signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
@njit<EOL>def func_two_prime(x):
|
return <NUM_LIT:4>*np.cos(<NUM_LIT:4>*(x - <NUM_LIT:1>/<NUM_LIT:4>)) + <NUM_LIT:20>*x**<NUM_LIT> + <NUM_LIT:1><EOL>
|
Derivative for func_two.
|
f5112:m4
|
@njit<EOL>def func_two_prime2(x):
|
return <NUM_LIT>*x**<NUM_LIT> - <NUM_LIT:16>*np.sin(<NUM_LIT:4>*(x - <NUM_LIT:1>/<NUM_LIT:4>))<EOL>
|
Second order derivative for func_two
|
f5112:m5
|
@njit<EOL>def f(x):
|
return -(x + <NUM_LIT>)**<NUM_LIT:2> + <NUM_LIT:1.0><EOL>
|
A function for testing on.
|
f5113:m0
|
@njit<EOL>def g(x, y):
|
return -x**<NUM_LIT:2> + y<EOL>
|
A multivariate function for testing on.
|
f5113:m2
|
@njit<EOL>def brent_max(func, a, b, args=(), xtol=<NUM_LIT>, maxiter=<NUM_LIT>):
|
if not np.isfinite(a):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if not np.isfinite(b):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if not a < b:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>maxfun = maxiter<EOL>status_flag = <NUM_LIT:0><EOL>sqrt_eps = np.sqrt(<NUM_LIT>)<EOL>golden_mean = <NUM_LIT:0.5> * (<NUM_LIT> - np.sqrt(<NUM_LIT>))<EOL>fulc = a + golden_mean * (b - a)<EOL>nfc, xf = fulc, fulc<EOL>rat = e = <NUM_LIT:0.0><EOL>x = xf<EOL>fx = -func(x, *args)<EOL>num = <NUM_LIT:1><EOL>ffulc = fnfc = fx<EOL>xm = <NUM_LIT:0.5> * (a + b)<EOL>tol1 = sqrt_eps * np.abs(xf) + xtol / <NUM_LIT><EOL>tol2 = <NUM_LIT> * tol1<EOL>while (np.abs(xf - xm) > (tol2 - <NUM_LIT:0.5> * (b - a))):<EOL><INDENT>golden = <NUM_LIT:1><EOL>if np.abs(e) > tol1:<EOL><INDENT>golden = <NUM_LIT:0><EOL>r = (xf - nfc) * (fx - ffulc)<EOL>q = (xf - fulc) * (fx - fnfc)<EOL>p = (xf - fulc) * q - (xf - nfc) * r<EOL>q = <NUM_LIT> * (q - r)<EOL>if q > <NUM_LIT:0.0>:<EOL><INDENT>p = -p<EOL><DEDENT>q = np.abs(q)<EOL>r = e<EOL>e = rat<EOL>if ((np.abs(p) < np.abs(<NUM_LIT:0.5>*q*r)) and (p > q*(a - xf)) and<EOL>(p < q * (b - xf))):<EOL><INDENT>rat = (p + <NUM_LIT:0.0>) / q<EOL>x = xf + rat<EOL>if ((x - a) < tol2) or ((b - x) < tol2):<EOL><INDENT>si = np.sign(xm - xf) + ((xm - xf) == <NUM_LIT:0>)<EOL>rat = tol1 * si<EOL><DEDENT><DEDENT>else: <EOL><INDENT>golden = <NUM_LIT:1><EOL><DEDENT><DEDENT>if golden: <EOL><INDENT>if xf >= xm:<EOL><INDENT>e = a - xf<EOL><DEDENT>else:<EOL><INDENT>e = b - xf<EOL><DEDENT>rat = golden_mean*e<EOL><DEDENT>if rat == <NUM_LIT:0>:<EOL><INDENT>si = np.sign(rat) + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>si = np.sign(rat)<EOL><DEDENT>x = xf + si * np.maximum(np.abs(rat), tol1)<EOL>fu = -func(x, *args)<EOL>num += <NUM_LIT:1><EOL>if fu <= fx:<EOL><INDENT>if x >= xf:<EOL><INDENT>a = xf<EOL><DEDENT>else:<EOL><INDENT>b = xf<EOL><DEDENT>fulc, ffulc = nfc, fnfc<EOL>nfc, fnfc = xf, fx<EOL>xf, fx = x, fu<EOL><DEDENT>else:<EOL><INDENT>if x < xf:<EOL><INDENT>a = x<EOL><DEDENT>else:<EOL><INDENT>b = x<EOL><DEDENT>if (fu <= fnfc) or (nfc == xf):<EOL><INDENT>fulc, ffulc = nfc, fnfc<EOL>nfc, fnfc = x, fu<EOL><DEDENT>elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):<EOL><INDENT>fulc, ffulc = x, fu<EOL><DEDENT><DEDENT>xm = <NUM_LIT:0.5> * (a + b)<EOL>tol1 = sqrt_eps * np.abs(xf) + xtol / <NUM_LIT><EOL>tol2 = <NUM_LIT> * tol1<EOL>if num >= maxfun:<EOL><INDENT>status_flag = <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT>fval = -fx<EOL>info = status_flag, num<EOL>return xf, fval, info<EOL>
|
Uses a jitted version of the maximization routine from SciPy's fminbound.
The algorithm is identical except that it's been switched to maximization
rather than minimization, and the tests for convergence have been stripped
out to allow for jit compilation.
Note that the input function `func` must be jitted or the call will fail.
Parameters
----------
func : jitted function
a : scalar
Lower bound for search
b : scalar
Upper bound for search
args : tuple, optional
Extra arguments passed to the objective function.
maxiter : int, optional
Maximum number of iterations to perform.
xtol : float, optional
Absolute error in solution `xopt` acceptable for convergence.
Returns
-------
xf : float
The maximizer
fval : float
The maximum value attained
info : tuple
A tuple of the form (status_flag, num_iter). Here status_flag
indicates whether or not the maximum number of function calls was
attained. A value of 0 implies that the maximum was not hit.
The value `num_iter` is the number of function calls.
Example
-------
```
@njit
def f(x):
return -(x + 2.0)**2 + 1.0
xf, fval, info = brent_max(f, -2, 2)
```
|
f5115:m0
|
@njit<EOL>def nelder_mead(fun, x0, bounds=np.array([[], []]).T, args=(), tol_f=<NUM_LIT>,<EOL>tol_x=<NUM_LIT>, max_iter=<NUM_LIT:1000>):
|
vertices = _initialize_simplex(x0, bounds)<EOL>results = _nelder_mead_algorithm(fun, vertices, bounds, args=args,<EOL>tol_f=tol_f, tol_x=tol_x,<EOL>max_iter=max_iter)<EOL>return results<EOL>
|
.. highlight:: none
Maximize a scalar-valued function with one or more variables using the
Nelder-Mead method.
This function is JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be maximized: `fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
x0 : ndarray(float, ndim=1)
Initial guess. Array of real elements of size (n,), where ‘n’ is the
number of independent variables.
bounds: ndarray(float, ndim=2), optional
Bounds for each variable for proposed solution, encoded as a sequence
of (min, max) pairs for each element in x. The default option is used
to specify no bounds on x.
args : tuple, optional
Extra arguments passed to the objective function.
tol_f : scalar(float), optional(default=1e-10)
Tolerance to be used for the function value convergence test.
tol_x : scalar(float), optional(default=1e-10)
Tolerance to be used for the function domain convergence test.
max_iter : scalar(float), optional(default=1000)
The maximum number of allowed iterations.
Returns
----------
results : namedtuple
A namedtuple containing the following items:
::
"x" : Approximate local maximizer
"fun" : Approximate local maximum value
"success" : 1 if the algorithm successfully terminated, 0 otherwise
"nit" : Number of iterations
"final_simplex" : Vertices of the final simplex
Examples
--------
>>> @njit
... def rosenbrock(x):
... return -(100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0])**2)
...
>>> x0 = np.array([-2, 1])
>>> qe.optimize.nelder_mead(rosenbrock, x0)
results(x=array([0.99999814, 0.99999756]), fun=-1.6936258239463265e-10,
success=True, nit=110,
final_simplex=array([[0.99998652, 0.9999727],
[1.00000218, 1.00000301],
[0.99999814, 0.99999756]]))
Notes
--------
This algorithm has a long history of successful use in applications, but it
will usually be slower than an algorithm that uses first or second
derivative information. In practice, it can have poor performance in
high-dimensional problems and is not robust to minimizing complicated
functions. Additionally, there currently is no complete theory describing
when the algorithm will successfully converge to the minimum, or how fast
it will if it does.
References
----------
.. [1] J. C. Lagarias, J. A. Reeds, M. H. Wright and P. E. Wright,
Convergence Properties of the Nelder–Mead Simplex Method in Low
Dimensions, SIAM. J. Optim. 9, 112–147 (1998).
.. [2] S. Singer and S. Singer, Efficient implementation of the Nelder–Mead
search algorithm, Appl. Numer. Anal. Comput. Math., vol. 1, no. 2,
pp. 524–534, 2004.
.. [3] J. A. Nelder and R. Mead, A simplex method for function
minimization, Comput. J. 7, 308–313 (1965).
.. [4] Gao, F. and Han, L., Implementing the Nelder-Mead simplex algorithm
with adaptive parameters, Comput Optim Appl (2012) 51: 259.
.. [5] http://www.scholarpedia.org/article/Nelder-Mead_algorithm
.. [6] http://www.brnt.eu/phd/node10.html#SECTION00622200000000000000
.. [7] Chase Coleman's tutorial on Nelder Mead
.. [8] SciPy's Nelder-Mead implementation
|
f5117:m0
|
@njit<EOL>def _nelder_mead_algorithm(fun, vertices, bounds=np.array([[], []]).T,<EOL>args=(), ρ=<NUM_LIT:1.>, χ=<NUM_LIT>, γ=<NUM_LIT:0.5>, σ=<NUM_LIT:0.5>, tol_f=<NUM_LIT>,<EOL>tol_x=<NUM_LIT>, max_iter=<NUM_LIT:1000>):
|
n = vertices.shape[<NUM_LIT:1>]<EOL>_check_params(ρ, χ, γ, σ, bounds, n)<EOL>nit = <NUM_LIT:0><EOL>ργ = ρ * γ<EOL>ρχ = ρ * χ<EOL>σ_n = σ ** n<EOL>f_val = np.empty(n+<NUM_LIT:1>, dtype=np.float64)<EOL>for i in range(n+<NUM_LIT:1>):<EOL><INDENT>f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i], args=args)<EOL><DEDENT>sort_ind = f_val.argsort()<EOL>LV_ratio = <NUM_LIT:1><EOL>x_bar = vertices[sort_ind[:n]].sum(axis=<NUM_LIT:0>) / n<EOL>while True:<EOL><INDENT>shrink = False<EOL>fail = nit >= max_iter<EOL>best_val_idx = sort_ind[<NUM_LIT:0>]<EOL>worst_val_idx = sort_ind[n]<EOL>term_f = f_val[worst_val_idx] - f_val[best_val_idx] < tol_f<EOL>term_x = LV_ratio < tol_x<EOL>if term_x or term_f or fail:<EOL><INDENT>break<EOL><DEDENT>x_r = x_bar + ρ * (x_bar - vertices[worst_val_idx])<EOL>f_r = _neg_bounded_fun(fun, bounds, x_r, args=args)<EOL>if f_r >= f_val[best_val_idx] and f_r < f_val[sort_ind[n-<NUM_LIT:1>]]:<EOL><INDENT>vertices[worst_val_idx] = x_r<EOL>LV_ratio *= ρ<EOL><DEDENT>elif f_r < f_val[best_val_idx]:<EOL><INDENT>x_e = x_bar + χ * (x_r - x_bar)<EOL>f_e = _neg_bounded_fun(fun, bounds, x_e, args=args)<EOL>if f_e < f_r: <EOL><INDENT>vertices[worst_val_idx] = x_e<EOL>LV_ratio *= ρχ<EOL><DEDENT>else:<EOL><INDENT>vertices[worst_val_idx] = x_r<EOL>LV_ratio *= ρ<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if f_r < f_val[worst_val_idx]: <EOL><INDENT>x_c = x_bar + γ * (x_r - x_bar)<EOL>LV_ratio_update = ργ<EOL><DEDENT>else: <EOL><INDENT>x_c = x_bar - γ * (x_r - x_bar)<EOL>LV_ratio_update = γ<EOL><DEDENT>f_c = _neg_bounded_fun(fun, bounds, x_c, args=args)<EOL>if f_c < min(f_r, f_val[worst_val_idx]): <EOL><INDENT>vertices[worst_val_idx] = x_c<EOL>LV_ratio *= LV_ratio_update<EOL><DEDENT>else:<EOL><INDENT>shrink = True<EOL>for i in sort_ind[<NUM_LIT:1>:]:<EOL><INDENT>vertices[i] = vertices[best_val_idx] + σ *(vertices[i] - vertices[best_val_idx])<EOL>f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i],<EOL>args=args)<EOL><DEDENT>sort_ind[<NUM_LIT:1>:] = f_val[sort_ind[<NUM_LIT:1>:]].argsort() + <NUM_LIT:1><EOL>x_bar = vertices[best_val_idx] + σ *(x_bar - vertices[best_val_idx]) +(vertices[worst_val_idx] - vertices[sort_ind[n]]) / n<EOL>LV_ratio *= σ_n<EOL><DEDENT><DEDENT>if not shrink: <EOL><INDENT>f_val[worst_val_idx] = _neg_bounded_fun(fun, bounds,<EOL>vertices[worst_val_idx],<EOL>args=args)<EOL>for i, j in enumerate(sort_ind):<EOL><INDENT>if f_val[worst_val_idx] < f_val[j]:<EOL><INDENT>sort_ind[i+<NUM_LIT:1>:] = sort_ind[i:-<NUM_LIT:1>]<EOL>sort_ind[i] = worst_val_idx<EOL>break<EOL><DEDENT><DEDENT>x_bar += (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n<EOL><DEDENT>nit += <NUM_LIT:1><EOL><DEDENT>return results(vertices[sort_ind[<NUM_LIT:0>]], -f_val[sort_ind[<NUM_LIT:0>]], not fail, nit,<EOL>vertices)<EOL>
|
.. highlight:: none
Implements the Nelder-Mead algorithm described in Lagarias et al. (1998)
modified to maximize instead of minimizing. JIT-compiled in `nopython`
mode using Numba.
Parameters
----------
fun : callable
The objective function to be maximized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n) to be modified in-place.
args : tuple, optional
Extra arguments passed to the objective function.
ρ : scalar(float), optional(default=1.)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float), optional(default=2.)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float), optional(default=0.5)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float), optional(default=0.5)
Shrinkage parameter. Must be strictly between 0 and 1.
tol_f : scalar(float), optional(default=1e-10)
Tolerance to be used for the function value convergence test.
tol_x : scalar(float), optional(default=1e-10)
Tolerance to be used for the function domain convergence test.
max_iter : scalar(float), optional(default=1000)
The maximum number of allowed iterations.
Returns
----------
results : namedtuple
A namedtuple containing the following items:
::
"x" : Approximate solution
"fun" : Approximate local maximum
"success" : 1 if successfully terminated, 0 otherwise
"nit" : Number of iterations
"final_simplex" : The vertices of the final simplex
|
f5117:m1
|
@njit<EOL>def _initialize_simplex(x0, bounds):
|
n = x0.size<EOL>vertices = np.empty((n + <NUM_LIT:1>, n), dtype=np.float64)<EOL>vertices[:] = x0<EOL>nonzdelt = <NUM_LIT><EOL>zdelt = <NUM_LIT><EOL>for i in range(n):<EOL><INDENT>if vertices[i + <NUM_LIT:1>, i] != <NUM_LIT:0.>:<EOL><INDENT>vertices[i + <NUM_LIT:1>, i] *= (<NUM_LIT:1> + nonzdelt)<EOL><DEDENT>else:<EOL><INDENT>vertices[i + <NUM_LIT:1>, i] = zdelt<EOL><DEDENT><DEDENT>return vertices<EOL>
|
Generates an initial simplex for the Nelder-Mead method. JIT-compiled in
`nopython` mode using Numba.
Parameters
----------
x0 : ndarray(float, ndim=1)
Initial guess. Array of real elements of size (n,), where ‘n’ is the
number of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x0.
Returns
----------
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n).
|
f5117:m2
|
@njit<EOL>def _check_params(ρ, χ, γ, σ, bounds, n):
|
if ρ < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if χ < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if χ < ρ:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if γ < <NUM_LIT:0> or γ > <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if σ < <NUM_LIT:0> or σ > <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if not (bounds.shape == (<NUM_LIT:0>, <NUM_LIT:2>) or bounds.shape == (n, <NUM_LIT:2>)):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if (np.atleast_2d(bounds)[:, <NUM_LIT:0>] > np.atleast_2d(bounds)[:, <NUM_LIT:1>]).any():<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>
|
Checks whether the parameters for the Nelder-Mead algorithm are valid.
JIT-compiled in `nopython` mode using Numba.
Parameters
----------
ρ : scalar(float)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float)
Shrinkage parameter. Must be strictly between 0 and 1.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
n : scalar(int)
Number of independent variables.
|
f5117:m3
|
@njit<EOL>def _check_bounds(x, bounds):
|
if bounds.shape == (<NUM_LIT:0>, <NUM_LIT:2>):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return ((np.atleast_2d(bounds)[:, <NUM_LIT:0>] <= x).all() and<EOL>(x <= np.atleast_2d(bounds)[:, <NUM_LIT:1>]).all())<EOL><DEDENT>
|
Checks whether `x` is within `bounds`. JIT-compiled in `nopython` mode
using Numba.
Parameters
----------
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
Returns
----------
bool
`True` if `x` is within `bounds`, `False` otherwise.
|
f5117:m4
|
@njit<EOL>def _neg_bounded_fun(fun, bounds, x, args=()):
|
if _check_bounds(x, bounds):<EOL><INDENT>return -fun(x, *args)<EOL><DEDENT>else:<EOL><INDENT>return np.inf<EOL><DEDENT>
|
Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be minimized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise.
|
f5117:m5
|
@njit<EOL>def _results(r):
|
x, funcalls, iterations, flag = r<EOL>return results(x, funcalls, iterations, flag == <NUM_LIT:0>)<EOL>
|
r"""Select from a tuple of(root, funccalls, iterations, flag)
|
f5118:m0
|
@njit<EOL>def newton(func, x0, fprime, args=(), tol=<NUM_LIT>, maxiter=<NUM_LIT:50>,<EOL>disp=True):
|
if tol <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if maxiter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>p0 = <NUM_LIT:1.0> * x0<EOL>funcalls = <NUM_LIT:0><EOL>status = _ECONVERR<EOL>for itr in range(maxiter):<EOL><INDENT>fval = func(p0, *args)<EOL>funcalls += <NUM_LIT:1><EOL>if fval == <NUM_LIT:0>:<EOL><INDENT>status = _ECONVERGED<EOL>p = p0<EOL>itr -= <NUM_LIT:1><EOL>break<EOL><DEDENT>fder = fprime(p0, *args)<EOL>funcalls += <NUM_LIT:1><EOL>if fder == <NUM_LIT:0>:<EOL><INDENT>p = p0<EOL>break<EOL><DEDENT>newton_step = fval / fder<EOL>p = p0 - newton_step<EOL>if abs(p - p0) < tol:<EOL><INDENT>status = _ECONVERGED<EOL>break<EOL><DEDENT>p0 = p<EOL><DEDENT>if disp and status == _ECONVERR:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise RuntimeError(msg)<EOL><DEDENT>return _results((p, funcalls, itr + <NUM_LIT:1>, status))<EOL>
|
Find a zero from the Newton-Raphson method using the jitted version of
Scipy's newton for scalars. Note that this does not provide an alternative
method such as secant. Thus, it is important that `fprime` can be provided.
Note that `func` and `fprime` must be jitted via Numba.
They are recommended to be `njit` for performance.
Parameters
----------
func : callable and jitted
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : callable and jitted
The derivative of the function (when available and convenient).
args : tuple, optional(default=())
Extra arguments to be used in the function call.
tol : float, optional(default=1.48e-8)
The allowable error of the zero value.
maxiter : int, optional(default=50)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge
Returns
-------
results : namedtuple
A namedtuple containing the following items:
::
root - Estimated location where function is zero.
function_calls - Number of times the function was called.
iterations - Number of iterations needed to find the root.
converged - True if the routine converged
|
f5118:m1
|
@njit<EOL>def newton_halley(func, x0, fprime, fprime2, args=(), tol=<NUM_LIT>,<EOL>maxiter=<NUM_LIT:50>, disp=True):
|
if tol <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if maxiter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>p0 = <NUM_LIT:1.0> * x0<EOL>funcalls = <NUM_LIT:0><EOL>status = _ECONVERR<EOL>for itr in range(maxiter):<EOL><INDENT>fval = func(p0, *args)<EOL>funcalls += <NUM_LIT:1><EOL>if fval == <NUM_LIT:0>:<EOL><INDENT>status = _ECONVERGED<EOL>p = p0<EOL>itr -= <NUM_LIT:1><EOL>break<EOL><DEDENT>fder = fprime(p0, *args)<EOL>funcalls += <NUM_LIT:1><EOL>if fder == <NUM_LIT:0>:<EOL><INDENT>p = p0<EOL>break<EOL><DEDENT>newton_step = fval / fder<EOL>fder2 = fprime2(p0, *args)<EOL>p = p0 - newton_step / (<NUM_LIT:1.0> - <NUM_LIT:0.5> * newton_step * fder2 / fder)<EOL>if abs(p - p0) < tol:<EOL><INDENT>status = _ECONVERGED<EOL>break<EOL><DEDENT>p0 = p<EOL><DEDENT>if disp and status == _ECONVERR:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise RuntimeError(msg)<EOL><DEDENT>return _results((p, funcalls, itr + <NUM_LIT:1>, status))<EOL>
|
Find a zero from Halley's method using the jitted version of
Scipy's.
`func`, `fprime`, `fprime2` must be jitted via Numba.
Parameters
----------
func : callable and jitted
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : callable and jitted
The derivative of the function (when available and convenient).
fprime2 : callable and jitted
The second order derivative of the function
args : tuple, optional(default=())
Extra arguments to be used in the function call.
tol : float, optional(default=1.48e-8)
The allowable error of the zero value.
maxiter : int, optional(default=50)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge
Returns
-------
results : namedtuple
A namedtuple containing the following items:
::
root - Estimated location where function is zero.
function_calls - Number of times the function was called.
iterations - Number of iterations needed to find the root.
converged - True if the routine converged
|
f5118:m2
|
@njit<EOL>def newton_secant(func, x0, args=(), tol=<NUM_LIT>, maxiter=<NUM_LIT:50>,<EOL>disp=True):
|
if tol <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if maxiter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>p0 = <NUM_LIT:1.0> * x0<EOL>funcalls = <NUM_LIT:0><EOL>status = _ECONVERR<EOL>if x0 >= <NUM_LIT:0>:<EOL><INDENT>p1 = x0 * (<NUM_LIT:1> + <NUM_LIT>) + <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>p1 = x0 * (<NUM_LIT:1> + <NUM_LIT>) - <NUM_LIT><EOL>q0 = func(p0, *args)<EOL><DEDENT>funcalls += <NUM_LIT:1><EOL>q1 = func(p1, *args)<EOL>funcalls += <NUM_LIT:1><EOL>for itr in range(maxiter):<EOL><INDENT>if q1 == q0:<EOL><INDENT>p = (p1 + p0) / <NUM_LIT><EOL>status = _ECONVERGED<EOL>break<EOL><DEDENT>else:<EOL><INDENT>p = p1 - q1 * (p1 - p0) / (q1 - q0)<EOL><DEDENT>if np.abs(p - p1) < tol:<EOL><INDENT>status = _ECONVERGED<EOL>break<EOL><DEDENT>p0 = p1<EOL>q0 = q1<EOL>p1 = p<EOL>q1 = func(p1, *args)<EOL>funcalls += <NUM_LIT:1><EOL><DEDENT>if disp and status == _ECONVERR:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise RuntimeError(msg)<EOL><DEDENT>return _results((p, funcalls, itr + <NUM_LIT:1>, status))<EOL>
|
Find a zero from the secant method using the jitted version of
Scipy's secant method.
Note that `func` must be jitted via Numba.
Parameters
----------
func : callable and jitted
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
args : tuple, optional(default=())
Extra arguments to be used in the function call.
tol : float, optional(default=1.48e-8)
The allowable error of the zero value.
maxiter : int, optional(default=50)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
A namedtuple containing the following items:
::
root - Estimated location where function is zero.
function_calls - Number of times the function was called.
iterations - Number of iterations needed to find the root.
converged - True if the routine converged
|
f5118:m3
|
@njit<EOL>def _bisect_interval(a, b, fa, fb):
|
if fa*fb > <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>root = <NUM_LIT:0.0><EOL>status = _ECONVERR<EOL>if fa == <NUM_LIT:0>:<EOL><INDENT>root = a<EOL>status = _ECONVERGED<EOL><DEDENT>if fb == <NUM_LIT:0>:<EOL><INDENT>root = b<EOL>status = _ECONVERGED<EOL><DEDENT>return root, status<EOL>
|
Conditional checks for intervals in methods involving bisection
|
f5118:m4
|
@njit<EOL>def bisect(f, a, b, args=(), xtol=_xtol,<EOL>rtol=_rtol, maxiter=_iter, disp=True):
|
if xtol <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if maxiter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>xa = a * <NUM_LIT:1.0><EOL>xb = b * <NUM_LIT:1.0><EOL>fa = f(xa, *args)<EOL>fb = f(xb, *args)<EOL>funcalls = <NUM_LIT:2><EOL>root, status = _bisect_interval(xa, xb, fa, fb)<EOL>if status == _ECONVERGED:<EOL><INDENT>itr = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>dm = xb - xa<EOL>for itr in range(maxiter):<EOL><INDENT>dm *= <NUM_LIT:0.5><EOL>xm = xa + dm<EOL>fm = f(xm, *args)<EOL>funcalls += <NUM_LIT:1><EOL>if fm * fa >= <NUM_LIT:0>:<EOL><INDENT>xa = xm<EOL><DEDENT>if fm == <NUM_LIT:0> or abs(dm) < xtol + rtol * abs(xm):<EOL><INDENT>root = xm<EOL>status = _ECONVERGED<EOL>itr += <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT><DEDENT>if disp and status == _ECONVERR:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>return _results((root, funcalls, itr, status))<EOL>
|
Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
|
f5118:m5
|
@njit<EOL>def brentq(f, a, b, args=(), xtol=_xtol,<EOL>rtol=_rtol, maxiter=_iter, disp=True):
|
if xtol <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if maxiter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>xpre = a * <NUM_LIT:1.0><EOL>xcur = b * <NUM_LIT:1.0><EOL>fpre = f(xpre, *args)<EOL>fcur = f(xcur, *args)<EOL>funcalls = <NUM_LIT:2><EOL>root, status = _bisect_interval(xpre, xcur, fpre, fcur)<EOL>if status == _ECONVERGED:<EOL><INDENT>itr = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>for itr in range(maxiter):<EOL><INDENT>if fpre * fcur < <NUM_LIT:0>:<EOL><INDENT>xblk = xpre<EOL>fblk = fpre<EOL>spre = scur = xcur - xpre<EOL><DEDENT>if abs(fblk) < abs(fcur):<EOL><INDENT>xpre = xcur<EOL>xcur = xblk<EOL>xblk = xpre<EOL>fpre = fcur<EOL>fcur = fblk<EOL>fblk = fpre<EOL><DEDENT>delta = (xtol + rtol * abs(xcur)) / <NUM_LIT:2><EOL>sbis = (xblk - xcur) / <NUM_LIT:2><EOL>if fcur == <NUM_LIT:0> or abs(sbis) < delta:<EOL><INDENT>status = _ECONVERGED<EOL>root = xcur<EOL>itr += <NUM_LIT:1><EOL>break<EOL><DEDENT>if abs(spre) > delta and abs(fcur) < abs(fpre):<EOL><INDENT>if xpre == xblk:<EOL><INDENT>stry = -fcur * (xcur - xpre) / (fcur - fpre)<EOL><DEDENT>else:<EOL><INDENT>dpre = (fpre - fcur) / (xpre - xcur)<EOL>dblk = (fblk - fcur) / (xblk - xcur)<EOL>stry = -fcur * (fblk * dblk - fpre * dpre) /(dblk * dpre * (fblk - fpre))<EOL><DEDENT>if (<NUM_LIT:2> * abs(stry) < min(abs(spre), <NUM_LIT:3> * abs(sbis) - delta)):<EOL><INDENT>spre = scur<EOL>scur = stry<EOL><DEDENT>else:<EOL><INDENT>spre = sbis<EOL>scur = sbis<EOL><DEDENT><DEDENT>else:<EOL><INDENT>spre = sbis<EOL>scur = sbis<EOL><DEDENT>xpre = xcur<EOL>fpre = fcur<EOL>if (abs(scur) > delta):<EOL><INDENT>xcur += scur<EOL><DEDENT>else:<EOL><INDENT>xcur += (delta if sbis > <NUM_LIT:0> else -delta)<EOL><DEDENT>fcur = f(xcur, *args)<EOL>funcalls += <NUM_LIT:1><EOL><DEDENT><DEDENT>if disp and status == _ECONVERR:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>return _results((root, funcalls, itr, status))<EOL>
|
Find a root of a function in a bracketing interval using Brent's method
adapted from Scipy's brentq.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b].
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
|
f5118:m6
|
def set_params(self):
|
<EOL>ma_poly = np.asarray(self._theta)<EOL>self.ma_poly = np.insert(ma_poly, <NUM_LIT:0>, <NUM_LIT:1>) <EOL>if np.isscalar(self._phi):<EOL><INDENT>ar_poly = np.array(-self._phi)<EOL><DEDENT>else:<EOL><INDENT>ar_poly = -np.asarray(self._phi)<EOL><DEDENT>self.ar_poly = np.insert(ar_poly, <NUM_LIT:0>, <NUM_LIT:1>) <EOL>if len(self.ar_poly) < len(self.ma_poly):<EOL><INDENT>temp = np.zeros(len(self.ma_poly) - len(self.ar_poly))<EOL>self.ar_poly = np.hstack((self.ar_poly, temp))<EOL><DEDENT>
|
r"""
Internally, scipy.signal works with systems of the form
.. math::
ar_{poly}(L) X_t = ma_{poly}(L) \epsilon_t
where L is the lag operator. To match this, we set
.. math::
ar_{poly} = (1, -\phi_1, -\phi_2,..., -\phi_p)
ma_{poly} = (1, \theta_1, \theta_2,..., \theta_q)
In addition, ar_poly must be at least as long as ma_poly.
This can be achieved by padding it out with zeros when required.
|
f5119:c0:m8
|
def impulse_response(self, impulse_length=<NUM_LIT:30>):
|
from scipy.signal import dimpulse<EOL>sys = self.ma_poly, self.ar_poly, <NUM_LIT:1><EOL>times, psi = dimpulse(sys, n=impulse_length)<EOL>psi = psi[<NUM_LIT:0>].flatten() <EOL>return psi<EOL>
|
Get the impulse response corresponding to our model.
Returns
-------
psi : array_like(float)
psi[j] is the response at lag j of the impulse response.
We take psi[0] as unity.
|
f5119:c0:m9
|
def spectral_density(self, two_pi=True, res=<NUM_LIT>):
|
from scipy.signal import freqz<EOL>w, h = freqz(self.ma_poly, self.ar_poly, worN=res, whole=two_pi)<EOL>spect = h * conj(h) * self.sigma**<NUM_LIT:2><EOL>return w, spect<EOL>
|
r"""
Compute the spectral density function. The spectral density is
the discrete time Fourier transform of the autocovariance
function. In particular,
.. math::
f(w) = \sum_k \gamma(k) \exp(-ikw)
where gamma is the autocovariance function and the sum is over
the set of all integers.
Parameters
----------
two_pi : Boolean, optional
Compute the spectral density function over :math:`[0, \pi]` if
two_pi is False and :math:`[0, 2 \pi]` otherwise. Default value is
True
res : scalar or array_like(int), optional(default=1200)
If res is a scalar then the spectral density is computed at
`res` frequencies evenly spaced around the unit circle, but
if res is an array then the function computes the response
at the frequencies given by the array
Returns
-------
w : array_like(float)
The normalized frequencies at which h was computed, in
radians/sample
spect : array_like(float)
The frequency response
|
f5119:c0:m10
|
def autocovariance(self, num_autocov=<NUM_LIT:16>):
|
spect = self.spectral_density()[<NUM_LIT:1>]<EOL>acov = np.fft.ifft(spect).real<EOL>return acov[:num_autocov]<EOL>
|
Compute the autocovariance function from the ARMA parameters
over the integers range(num_autocov) using the spectral density
and the inverse Fourier transform.
Parameters
----------
num_autocov : scalar(int), optional(default=16)
The number of autocovariances to calculate
|
f5119:c0:m11
|
def simulation(self, ts_length=<NUM_LIT>, random_state=None):
|
from scipy.signal import dlsim<EOL>random_state = check_random_state(random_state)<EOL>sys = self.ma_poly, self.ar_poly, <NUM_LIT:1><EOL>u = random_state.randn(ts_length, <NUM_LIT:1>) * self.sigma<EOL>vals = dlsim(sys, u)[<NUM_LIT:1>]<EOL>return vals.flatten()<EOL>
|
Compute a simulated sample path assuming Gaussian shocks.
Parameters
----------
ts_length : scalar(int), optional(default=90)
Number of periods to simulate for
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
vals : array_like(float)
A simulation of the model that corresponds to this class
|
f5119:c0:m12
|
def _integrate_fixed_trajectory(self, h, T, step, relax):
|
<EOL>solution = np.hstack((self.t, self.y))<EOL>while self.successful():<EOL><INDENT>self.integrate(self.t + h, step, relax)<EOL>current_step = np.hstack((self.t, self.y))<EOL>solution = np.vstack((solution, current_step))<EOL>if (h > <NUM_LIT:0>) and (self.t >= T):<EOL><INDENT>break<EOL><DEDENT>elif (h < <NUM_LIT:0>) and (self.t <= T):<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>return solution<EOL>
|
Generates a solution trajectory of fixed length.
|
f5120:c0:m1
|
def _integrate_variable_trajectory(self, h, g, tol, step, relax):
|
<EOL>solution = np.hstack((self.t, self.y))<EOL>while self.successful():<EOL><INDENT>self.integrate(self.t + h, step, relax)<EOL>current_step = np.hstack((self.t, self.y))<EOL>solution = np.vstack((solution, current_step))<EOL>if g(self.t, self.y, *self.f_params) < tol:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>return solution<EOL>
|
Generates a solution trajectory of variable length.
|
f5120:c0:m2
|
def _initialize_integrator(self, t0, y0, integrator, **kwargs):
|
<EOL>self.set_initial_value(y0, t0)<EOL>self.set_integrator(integrator, **kwargs)<EOL>
|
Initializes the integrator prior to integration.
|
f5120:c0:m3
|
def compute_residual(self, traj, ti, k=<NUM_LIT:3>, ext=<NUM_LIT:2>):
|
<EOL>soln = self.interpolate(traj, ti, k, <NUM_LIT:0>, ext)<EOL>deriv = self.interpolate(traj, ti, k, <NUM_LIT:1>, ext)<EOL>T = ti.size<EOL>rhs_ode = np.vstack(self.f(ti[i], soln[i, <NUM_LIT:1>:], *self.f_params)<EOL>for i in range(T))<EOL>rhs_ode = np.hstack((ti[:, np.newaxis], rhs_ode))<EOL>residual = deriv - rhs_ode<EOL>return residual<EOL>
|
r"""
The residual is the difference between the derivative of the B-spline
approximation of the solution trajectory and the right-hand side of the
original ODE evaluated along the approximated solution trajectory.
Parameters
----------
traj : array_like (float)
Solution trajectory providing the data points for constructing the
B-spline representation.
ti : array_like (float)
Array of values for the independent variable at which to
interpolate the value of the B-spline.
k : int, optional(default=3)
Degree of the desired B-spline. Degree must satisfy
:math:`1 \le k \le 5`.
ext : int, optional(default=2)
Controls the value of returned elements for outside the
original knot sequence provided by traj. For extrapolation, set
`ext=0`; `ext=1` returns zero; `ext=2` raises a `ValueError`.
Returns
-------
residual : array (float)
Difference between the derivative of the B-spline approximation
of the solution trajectory and the right-hand side of the ODE
evaluated along the approximated solution trajectory.
|
f5120:c0:m4
|
def solve(self, t0, y0, h=<NUM_LIT:1.0>, T=None, g=None, tol=None,<EOL>integrator='<STR_LIT>', step=False, relax=False, **kwargs):
|
self._initialize_integrator(t0, y0, integrator, **kwargs)<EOL>if (g is not None) and (tol is not None):<EOL><INDENT>soln = self._integrate_variable_trajectory(h, g, tol, step, relax)<EOL><DEDENT>elif T is not None:<EOL><INDENT>soln = self._integrate_fixed_trajectory(h, T, step, relax)<EOL><DEDENT>else:<EOL><INDENT>mesg = "<STR_LIT>"<EOL>raise ValueError(mesg)<EOL><DEDENT>return soln<EOL>
|
r"""
Solve the IVP by integrating the ODE given some initial condition.
Parameters
----------
t0 : float
Initial condition for the independent variable.
y0 : array_like (float, shape=(n,))
Initial condition for the dependent variables.
h : float, optional(default=1.0)
Step-size for computing the solution. Can be positive or negative
depending on the desired direction of integration.
T : int, optional(default=None)
Terminal value for the independent variable. One of either `T`
or `g` must be specified.
g : callable ``g(t, y, f_args)``, optional(default=None)
Provides a stopping condition for the integration. If specified
user must also specify a stopping tolerance, `tol`.
tol : float, optional (default=None)
Stopping tolerance for the integration. Only required if `g` is
also specifed.
integrator : str, optional(default='dopri5')
Must be one of 'vode', 'lsoda', 'dopri5', or 'dop853'
step : bool, optional(default=False)
Allows access to internal steps for those solvers that use adaptive
step size routines. Currently only 'vode', 'zvode', and 'lsoda'
support `step=True`.
relax : bool, optional(default=False)
Currently only 'vode', 'zvode', and 'lsoda' support `relax=True`.
**kwargs : dict, optional(default=None)
Dictionary of integrator specific keyword arguments. See the
Notes section of the docstring for `scipy.integrate.ode` for a
complete description of solver specific keyword arguments.
Returns
-------
solution: ndarray (float)
Simulated solution trajectory.
|
f5120:c0:m5
|
def interpolate(self, traj, ti, k=<NUM_LIT:3>, der=<NUM_LIT:0>, ext=<NUM_LIT:2>):
|
<EOL>u = traj[:, <NUM_LIT:0>]<EOL>n = traj.shape[<NUM_LIT:1>]<EOL>x = [traj[:, i] for i in range(<NUM_LIT:1>, n)]<EOL>tck, t = interpolate.splprep(x, u=u, k=k, s=<NUM_LIT:0>)<EOL>out = interpolate.splev(ti, tck, der, ext)<EOL>interp_traj = np.hstack((ti[:, np.newaxis], np.array(out).T))<EOL>return interp_traj<EOL>
|
r"""
Parametric B-spline interpolation in N-dimensions.
Parameters
----------
traj : array_like (float)
Solution trajectory providing the data points for constructing the
B-spline representation.
ti : array_like (float)
Array of values for the independent variable at which to
interpolate the value of the B-spline.
k : int, optional(default=3)
Degree of the desired B-spline. Degree must satisfy
:math:`1 \le k \le 5`.
der : int, optional(default=0)
The order of derivative of the spline to compute (must be less
than or equal to `k`).
ext : int, optional(default=2) Controls the value of returned elements
for outside the original knot sequence provided by traj. For
extrapolation, set `ext=0`; `ext=1` returns zero; `ext=2` raises a
`ValueError`.
Returns
-------
interp_traj: ndarray (float)
The interpolated trajectory.
|
f5120:c0:m6
|
def whitener_lss(self):
|
K = self.K_infinity<EOL>n, k, m, l = self.ss.n, self.ss.k, self.ss.m, self.ss.l<EOL>A, C, G, H = self.ss.A, self.ss.C, self.ss.G, self.ss.H<EOL>Atil = np.vstack([np.hstack([A, np.zeros((n, n)), np.zeros((n, l))]),<EOL>np.hstack([dot(K, G), A-dot(K, G), dot(K, H)]),<EOL>np.zeros((l, <NUM_LIT:2>*n + l))])<EOL>Ctil = np.vstack([np.hstack([C, np.zeros((n, l))]),<EOL>np.zeros((n, m+l)),<EOL>np.hstack([np.zeros((l, m)), np.eye(l)])])<EOL>Gtil = np.hstack([G, -G, H])<EOL>whitened_lss = LinearStateSpace(Atil, Ctil, Gtil)<EOL>self.whitened_lss = whitened_lss<EOL>return whitened_lss<EOL>
|
r"""
This function takes the linear state space system
that is an input to the Kalman class and it converts
that system to the time-invariant whitener represenation
given by
.. math::
\tilde{x}_{t+1}^* = \tilde{A} \tilde{x} + \tilde{C} v
a = \tilde{G} \tilde{x}
where
.. math::
\tilde{x}_t = [x+{t}, \hat{x}_{t}, v_{t}]
and
.. math::
\tilde{A} =
\begin{bmatrix}
A & 0 & 0 \\
KG & A-KG & KH \\
0 & 0 & 0 \\
\end{bmatrix}
.. math::
\tilde{C} =
\begin{bmatrix}
C & 0 \\
0 & 0 \\
0 & I \\
\end{bmatrix}
.. math::
\tilde{G} =
\begin{bmatrix}
G & -G & H \\
\end{bmatrix}
with :math:`A, C, G, H` coming from the linear state space system
that defines the Kalman instance
Returns
-------
whitened_lss : LinearStateSpace
This is the linear state space system that represents
the whitened system
|
f5121:c0:m6
|
def prior_to_filtered(self, y):
|
<EOL>G, H = self.ss.G, self.ss.H<EOL>R = np.dot(H, H.T)<EOL>y = np.atleast_2d(y)<EOL>y.shape = self.ss.k, <NUM_LIT:1><EOL>E = dot(self.Sigma, G.T)<EOL>F = dot(dot(G, self.Sigma), G.T) + R<EOL>M = dot(E, inv(F))<EOL>self.x_hat = self.x_hat + dot(M, (y - dot(G, self.x_hat)))<EOL>self.Sigma = self.Sigma - dot(M, dot(G, self.Sigma))<EOL>
|
r"""
Updates the moments (x_hat, Sigma) of the time t prior to the
time t filtering distribution, using current measurement :math:`y_t`.
The updates are according to
.. math::
\hat{x}^F = \hat{x} + \Sigma G' (G \Sigma G' + R)^{-1}
(y - G \hat{x})
\Sigma^F = \Sigma - \Sigma G' (G \Sigma G' + R)^{-1} G
\Sigma
Parameters
----------
y : scalar or array_like(float)
The current measurement
|
f5121:c0:m7
|
def filtered_to_forecast(self):
|
<EOL>A, C = self.ss.A, self.ss.C<EOL>Q = np.dot(C, C.T)<EOL>self.x_hat = dot(A, self.x_hat)<EOL>self.Sigma = dot(A, dot(self.Sigma, A.T)) + Q<EOL>
|
Updates the moments of the time t filtering distribution to the
moments of the predictive distribution, which becomes the time
t+1 prior
|
f5121:c0:m8
|
def update(self, y):
|
self.prior_to_filtered(y)<EOL>self.filtered_to_forecast()<EOL>
|
Updates x_hat and Sigma given k x 1 ndarray y. The full
update, from one period to the next
Parameters
----------
y : np.ndarray
A k x 1 ndarray y representing the current measurement
|
f5121:c0:m9
|
def stationary_values(self, method='<STR_LIT>'):
|
<EOL>A, C, G, H = self.ss.A, self.ss.C, self.ss.G, self.ss.H<EOL>Q, R = np.dot(C, C.T), np.dot(H, H.T)<EOL>Sigma_infinity = solve_discrete_riccati(A.T, G.T, Q, R, method=method)<EOL>temp1 = dot(dot(A, Sigma_infinity), G.T)<EOL>temp2 = inv(dot(G, dot(Sigma_infinity, G.T)) + R)<EOL>K_infinity = dot(temp1, temp2)<EOL>self._Sigma_infinity, self._K_infinity = Sigma_infinity, K_infinity<EOL>return Sigma_infinity, K_infinity<EOL>
|
Computes the limit of :math:`\Sigma_t` as t goes to infinity by
solving the associated Riccati equation. The outputs are stored in the
attributes `K_infinity` and `Sigma_infinity`. Computation is via the
doubling algorithm (default) or a QZ decomposition method (see the
documentation in `matrix_eqn.solve_discrete_riccati`).
Parameters
----------
method : str, optional(default="doubling")
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
Sigma_infinity : array_like or scalar(float)
The infinite limit of :math:`\Sigma_t`
K_infinity : array_like or scalar(float)
The stationary Kalman gain.
|
f5121:c0:m10
|
def stationary_coefficients(self, j, coeff_type='<STR_LIT>'):
|
<EOL>A, G = self.ss.A, self.ss.G<EOL>K_infinity = self.K_infinity<EOL>coeffs = []<EOL>i = <NUM_LIT:1><EOL>if coeff_type == '<STR_LIT>':<EOL><INDENT>coeffs.append(np.identity(self.ss.k))<EOL>P_mat = A<EOL>P = np.identity(self.ss.n) <EOL><DEDENT>elif coeff_type == '<STR_LIT>':<EOL><INDENT>coeffs.append(dot(G, K_infinity))<EOL>P_mat = A - dot(K_infinity, G)<EOL>P = np.copy(P_mat) <EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>while i <= j:<EOL><INDENT>coeffs.append(dot(dot(G, P), K_infinity))<EOL>P = dot(P, P_mat)<EOL>i += <NUM_LIT:1><EOL><DEDENT>return coeffs<EOL>
|
Wold representation moving average or VAR coefficients for the
steady state Kalman filter.
Parameters
----------
j : int
The lag length
coeff_type : string, either 'ma' or 'var' (default='ma')
The type of coefficent sequence to compute. Either 'ma' for
moving average or 'var' for VAR.
|
f5121:c0:m11
|
def hamilton_filter(data, h, *args):
|
<EOL>y = np.asarray(data, float)<EOL>T = len(y)<EOL>if len(args) == <NUM_LIT:1>: <EOL><INDENT>p = args[<NUM_LIT:0>]<EOL>X = np.ones((T-p-h+<NUM_LIT:1>, p+<NUM_LIT:1>))<EOL>for j in range(<NUM_LIT:1>, p+<NUM_LIT:1>):<EOL><INDENT>X[:, j] = y[p-j:T-h-j+<NUM_LIT:1>:<NUM_LIT:1>]<EOL><DEDENT>b = np.linalg.solve(X.transpose()@X, X.transpose()@y[p+h-<NUM_LIT:1>:T])<EOL>trend = np.append(np.zeros(p+h-<NUM_LIT:1>)+np.nan, X@b)<EOL>cycle = y - trend<EOL><DEDENT>elif len(args) == <NUM_LIT:0>: <EOL><INDENT>cycle = np.append(np.zeros(h)+np.nan, y[h:T] - y[<NUM_LIT:0>:T-h])<EOL>trend = y - cycle<EOL><DEDENT>return cycle, trend<EOL>
|
r"""
This function applies "Hamilton filter" to the data
http://econweb.ucsd.edu/~jhamilto/hp.pdf
Parameters
----------
data : arrray or dataframe
h : integer
Time horizon that we are likely to predict incorrectly.
Original paper recommends 2 for annual data, 8 for quarterly data,
24 for monthly data.
*args : integer
If supplied, it is p in the paper. Number of lags in regression.
Must be greater than h.
If not supplied, random walk process is assumed.
Note: For seasonal data, it's desirable for p and h to be integer multiples
of the number of obsevations in a year.
e.g. For quarterly data, h = 8 and p = 4 are recommended.
Returns
-------
cycle : array of cyclical component
trend : trend component
|
f5122:m0
|
@generated_jit(nopython=True, cache=True)<EOL>def _numba_linalg_solve(a, b):
|
numba_xgesv = _LAPACK().numba_xgesv(a.dtype)<EOL>kind = ord(_blas_kinds[a.dtype])<EOL>def _numba_linalg_solve_impl(a, b): <EOL><INDENT>n = a.shape[-<NUM_LIT:1>]<EOL>if b.ndim == <NUM_LIT:1>:<EOL><INDENT>nrhs = <NUM_LIT:1><EOL><DEDENT>else: <EOL><INDENT>nrhs = b.shape[-<NUM_LIT:1>]<EOL><DEDENT>F_INT_nptype = np.int32<EOL>ipiv = np.empty(n, dtype=F_INT_nptype)<EOL>r = numba_xgesv(<EOL>kind, <EOL>n, <EOL>nrhs, <EOL>a.ctypes, <EOL>n, <EOL>ipiv.ctypes, <EOL>b.ctypes, <EOL>n <EOL>)<EOL>return r<EOL><DEDENT>return _numba_linalg_solve_impl<EOL>
|
Solve the linear equation ax = b directly calling a Numba internal
function. The data in `a` and `b` are interpreted in Fortran order,
and dtype of `a` and `b` must be the same, one of {float32, float64,
complex64, complex128}. `a` and `b` are modified in place, and the
solution is stored in `b`. *No error check is made for the inputs.*
Parameters
----------
a : ndarray(ndim=2)
2-dimensional ndarray of shape (n, n).
b : ndarray(ndim=1 or 2)
1-dimensional ndarray of shape (n,) or 2-dimensional ndarray of
shape (n, nrhs).
Returns
-------
r : scalar(int)
r = 0 if successful.
Notes
-----
From github.com/numba/numba/blob/master/numba/targets/linalg.py
|
f5123:m0
|
@jit(types.intp(types.intp, types.intp), nopython=True, cache=True)<EOL>def comb_jit(N, k):
|
<EOL>INTP_MAX = np.iinfo(np.intp).max<EOL>if N < <NUM_LIT:0> or k < <NUM_LIT:0> or k > N:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if k == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if k == <NUM_LIT:1>:<EOL><INDENT>return N<EOL><DEDENT>if N == INTP_MAX:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>M = N + <NUM_LIT:1><EOL>nterms = min(k, N - k)<EOL>val = <NUM_LIT:1><EOL>for j in range(<NUM_LIT:1>, nterms + <NUM_LIT:1>):<EOL><INDENT>if val > INTP_MAX // (M - j):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>val *= M - j<EOL>val //= j<EOL><DEDENT>return val<EOL>
|
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
|
f5123:m1
|
@jit(nopython=True, cache=True)<EOL>def next_k_array(a):
|
<EOL>k = len(a)<EOL>if k == <NUM_LIT:1> or a[<NUM_LIT:0>] + <NUM_LIT:1> < a[<NUM_LIT:1>]:<EOL><INDENT>a[<NUM_LIT:0>] += <NUM_LIT:1><EOL>return a<EOL><DEDENT>a[<NUM_LIT:0>] = <NUM_LIT:0><EOL>i = <NUM_LIT:1><EOL>x = a[i] + <NUM_LIT:1><EOL>while i < k-<NUM_LIT:1> and x == a[i+<NUM_LIT:1>]:<EOL><INDENT>i += <NUM_LIT:1><EOL>a[i-<NUM_LIT:1>] = i - <NUM_LIT:1><EOL>x = a[i] + <NUM_LIT:1><EOL><DEDENT>a[i] = x<EOL>return a<EOL>
|
Given an array `a` of k distinct nonnegative integers, sorted in
ascending order, return the next k-array in the lexicographic
ordering of the descending sequences of the elements [1]_. `a` is
modified in place.
Parameters
----------
a : ndarray(int, ndim=1)
Array of length k.
Returns
-------
a : ndarray(int, ndim=1)
View of `a`.
Examples
--------
Enumerate all the subsets with k elements of the set {0, ..., n-1}.
>>> n, k = 4, 2
>>> a = np.arange(k)
>>> while a[-1] < n:
... print(a)
... a = next_k_array(a)
...
[0 1]
[0 2]
[1 2]
[0 3]
[1 3]
[2 3]
References
----------
.. [1] `Combinatorial number system
<https://en.wikipedia.org/wiki/Combinatorial_number_system>`_,
Wikipedia.
|
f5130:m0
|
def k_array_rank(a):
|
k = len(a)<EOL>idx = int(a[<NUM_LIT:0>]) <EOL>for i in range(<NUM_LIT:1>, k):<EOL><INDENT>idx += comb(a[i], i+<NUM_LIT:1>, exact=True)<EOL><DEDENT>return idx<EOL>
|
Given an array `a` of k distinct nonnegative integers, sorted in
ascending order, return its ranking in the lexicographic ordering of
the descending sequences of the elements [1]_.
Parameters
----------
a : ndarray(int, ndim=1)
Array of length k.
Returns
-------
idx : scalar(int)
Ranking of `a`.
References
----------
.. [1] `Combinatorial number system
<https://en.wikipedia.org/wiki/Combinatorial_number_system>`_,
Wikipedia.
|
f5130:m1
|
@jit(nopython=True, cache=True)<EOL>def k_array_rank_jit(a):
|
k = len(a)<EOL>idx = a[<NUM_LIT:0>]<EOL>for i in range(<NUM_LIT:1>, k):<EOL><INDENT>idx += comb_jit(a[i], i+<NUM_LIT:1>)<EOL><DEDENT>return idx<EOL>
|
Numba jit version of `k_array_rank`.
Notes
-----
An incorrect value will be returned without warning or error if
overflow occurs during the computation. It is the user's
responsibility to ensure that the rank of the input array fits
within the range of possible values of `np.intp`; a sufficient
condition for it is `scipy.special.comb(a[-1]+1, len(a), exact=True)
<= np.iinfo(np.intp).max`.
|
f5130:m2
|
def tic(self):
|
t = time.time()<EOL>self.start = t<EOL>self.last = t<EOL>
|
Save time for future use with `tac()` or `toc()`.
|
f5131:c0:m0
|
def tac(self, verbose=True, digits=<NUM_LIT:2>):
|
if self.start is None:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>t = time.time()<EOL>elapsed = t-self.last<EOL>self.last = t<EOL>if verbose:<EOL><INDENT>m, s = divmod(elapsed, <NUM_LIT>)<EOL>h, m = divmod(m, <NUM_LIT>)<EOL>print("<STR_LIT>" %<EOL>(h, m, s, digits, (s % <NUM_LIT:1>)*(<NUM_LIT:10>**digits)))<EOL><DEDENT>return elapsed<EOL>
|
Return and print elapsed time since last `tic()`, `tac()`, or
`toc()`.
Parameters
----------
verbose : bool, optional(default=True)
If True, then prints time.
digits : scalar(int), optional(default=2)
Number of digits printed for time elapsed.
Returns
-------
elapsed : scalar(float)
Time elapsed since last `tic()`, `tac()`, or `toc()`.
|
f5131:c0:m1
|
def toc(self, verbose=True, digits=<NUM_LIT:2>):
|
if self.start is None:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>t = time.time()<EOL>self.last = t<EOL>elapsed = t-self.start<EOL>if verbose:<EOL><INDENT>m, s = divmod(elapsed, <NUM_LIT>)<EOL>h, m = divmod(m, <NUM_LIT>)<EOL>print("<STR_LIT>" %<EOL>(h, m, s, digits, (s % <NUM_LIT:1>)*(<NUM_LIT:10>**digits)))<EOL><DEDENT>return elapsed<EOL>
|
Return and print time elapsed since last `tic()`.
Parameters
----------
verbose : bool, optional(default=True)
If True, then prints time.
digits : scalar(int), optional(default=2)
Number of digits printed for time elapsed.
Returns
-------
elapsed : scalar(float)
Time elapsed since last `tic()`.
|
f5131:c0:m2
|
def loop_timer(self, n, function, args=None, verbose=True, digits=<NUM_LIT:2>,<EOL>best_of=<NUM_LIT:3>):
|
tic()<EOL>all_times = np.empty(n)<EOL>for run in range(n):<EOL><INDENT>if hasattr(args, '<STR_LIT>'):<EOL><INDENT>function(*args)<EOL><DEDENT>elif args is None:<EOL><INDENT>function()<EOL><DEDENT>else:<EOL><INDENT>function(args)<EOL><DEDENT>all_times[run] = tac(verbose=False, digits=digits)<EOL><DEDENT>elapsed = toc(verbose=False, digits=digits)<EOL>m, s = divmod(elapsed, <NUM_LIT>)<EOL>h, m = divmod(m, <NUM_LIT>)<EOL>print("<STR_LIT>" %<EOL>(h, m, s, digits, (s % <NUM_LIT:1>)*(<NUM_LIT:10>**digits)))<EOL>average_time = all_times.mean()<EOL>average_of_best = np.sort(all_times)[:best_of].mean()<EOL>if verbose:<EOL><INDENT>m, s = divmod(average_time, <NUM_LIT>)<EOL>h, m = divmod(m, <NUM_LIT>)<EOL>print("<STR_LIT>" %<EOL>(n, h, m, s, digits, (s % <NUM_LIT:1>)*(<NUM_LIT:10>**digits)))<EOL>m, s = divmod(average_of_best, <NUM_LIT>)<EOL>h, m = divmod(m, <NUM_LIT>)<EOL>print("<STR_LIT>" %<EOL>(best_of, h, m, s, digits, (s % <NUM_LIT:1>)*(<NUM_LIT:10>**digits)))<EOL><DEDENT>return average_time, average_of_best<EOL>
|
Return and print the total and average time elapsed for n runs
of function.
Parameters
----------
n : scalar(int)
Number of runs.
function : function
Function to be timed.
args : list, optional(default=None)
Arguments of the function.
verbose : bool, optional(default=True)
If True, then prints average time.
digits : scalar(int), optional(default=2)
Number of digits printed for time elapsed.
best_of : scalar(int), optional(default=3)
Average time over best_of runs.
Returns
-------
average_time : scalar(float)
Average time elapsed for n runs of function.
average_of_best : scalar(float)
Average of best_of times for n runs of function.
|
f5131:c0:m3
|
def fetch_nb_dependencies(files, repo=REPO, raw=RAW, branch=BRANCH, folder=FOLDER, overwrite=False, verbose=True):
|
import requests<EOL>if type(files) == list:<EOL><INDENT>files = {"<STR_LIT>" : files}<EOL><DEDENT>status = []<EOL>for directory in files.keys():<EOL><INDENT>if directory != "<STR_LIT>":<EOL><INDENT>if verbose: print("<STR_LIT>"%directory)<EOL><DEDENT>for fl in files[directory]:<EOL><INDENT>if directory != "<STR_LIT>":<EOL><INDENT>fl = directory+"<STR_LIT:/>"+fl<EOL><DEDENT>if not overwrite:<EOL><INDENT>if os.path.isfile(fl):<EOL><INDENT>if verbose: print(<EOL>"<STR_LIT>" % fl)<EOL>status.append(False)<EOL>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose: print("<STR_LIT>"%fl)<EOL><DEDENT>if verbose: print("<STR_LIT>"%fl)<EOL>url = "<STR_LIT:/>".join([repo, raw, branch, folder, fl])<EOL>r = requests.get(url)<EOL>with open(fl, "<STR_LIT:wb>") as fl:<EOL><INDENT>fl.write(r.content)<EOL><DEDENT>status.append(True)<EOL><DEDENT><DEDENT>return status<EOL>
|
Retrieve raw files from QuantEcon.notebooks or other Github repo
Parameters
----------
file_list list or dict
A list of files to specify a collection of filenames
A dict of dir : list(files) to specify a directory
repo str, optional(default=REPO)
raw str, optional(defualt=RAW)
This is here in case github changes access to their raw files through web links
branch str, optional(default=BRANCH)
folder str, optional(default=FOLDER)
overwrite bool, optional(default=False)
verbose bool, optional(default=True)
Examples
--------
Consider a notebook that is dependant on a ``csv`` file to execute. If this file is
located in a Github repository then it can be fetched using this utility
Assuming the file is at the root level in the ``master`` branch then:
>>> from quantecon.util import fetch_nb_dependencies
>>> status = fetch_nb_dependencies(["test.csv"], repo="https://<github_address>")
More than one file may be requested in the list provided
>>> status = fetch_nb_dependencies(["test.csv", "data.csv"], repo="https://<github_address>")
A folder location can be added using ``folder=``
>>> status = fetch_nb_dependencies("test.csv", report="https://<github_address>", folder="data")
You can also specify a specific branch using ``branch=`` keyword argument.
This will download the requested file(s) to your local working directory. The default
behaviour is **not** to overwrite a local file if it is present. This can be switched off
by setting ``overwrite=True``.
|
f5133:m0
|
def check_random_state(seed):
|
if seed is None or seed is np.random:<EOL><INDENT>return np.random.mtrand._rand<EOL><DEDENT>if isinstance(seed, (numbers.Integral, np.integer)):<EOL><INDENT>return np.random.RandomState(seed)<EOL><DEDENT>if isinstance(seed, np.random.RandomState):<EOL><INDENT>return seed<EOL><DEDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % seed)<EOL>
|
Check the random state of a given seed.
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
.. Note
----
1. This code was sourced from scikit-learn
|
f5134:m0
|
@jit(nopython=True)<EOL>def searchsorted(a, v):
|
lo = -<NUM_LIT:1><EOL>hi = len(a)<EOL>while(lo < hi-<NUM_LIT:1>):<EOL><INDENT>m = (lo + hi) // <NUM_LIT:2><EOL>if v < a[m]:<EOL><INDENT>hi = m<EOL><DEDENT>else:<EOL><INDENT>lo = m<EOL><DEDENT><DEDENT>return hi<EOL>
|
Custom version of np.searchsorted. Return the largest index `i` such
that `a[i-1] <= v < a[i]` (for `i = 0`, `v < a[0]`); if `v[n-1] <=
v`, return `n`, where `n = len(a)`.
Parameters
----------
a : ndarray(float, ndim=1)
Input array. Must be sorted in ascending order.
v : scalar(float)
Value to be compared with elements of `a`.
Returns
-------
scalar(int)
Largest index `i` such that `a[i-1] <= v < a[i]`, or len(a) if
no such index exists.
Notes
-----
This routine is jit-complied if the module Numba is vailable; if
not, it is an alias of np.searchsorted(a, v, side='right').
Examples
--------
>>> a = np.array([0.2, 0.4, 1.0])
>>> searchsorted(a, 0.1)
0
>>> searchsorted(a, 0.4)
2
>>> searchsorted(a, 2)
3
|
f5135:m0
|
def __call__(self, x):
|
return np.mean(self.observations <= x)<EOL>
|
Evaluates the ecdf at x
Parameters
----------
x : scalar(float)
The x at which the ecdf is evaluated
Returns
-------
scalar(float)
Fraction of the sample less than x
|
f5137:c0:m3
|
def rank_est(A, atol=<NUM_LIT>, rtol=<NUM_LIT:0>):
|
A = np.atleast_2d(A)<EOL>s = svd(A, compute_uv=False)<EOL>tol = max(atol, rtol * s[<NUM_LIT:0>])<EOL>rank = int((s >= tol).sum())<EOL>return rank<EOL>
|
Estimate the rank (i.e. the dimension of the nullspace) of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : array_like(float, ndim=1 or 2)
A should be at most 2-D. A 1-D array with length n will be
treated as a 2-D with shape (1, n)
atol : scalar(float), optional(default=1e-13)
The absolute tolerance for a zero singular value. Singular
values smaller than `atol` are considered to be zero.
rtol : scalar(float), optional(default=0)
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
Returns
-------
r : scalar(int)
The estimated rank of the matrix.
Note: If both `atol` and `rtol` are positive, the combined tolerance
is the maximum of the two; that is:
tol = max(atol, rtol * smax)
Note: Singular values smaller than `tol` are considered to be zero.
See also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does
not provide the option of the absolute tolerance.
|
f5138:m0
|
def nullspace(A, atol=<NUM_LIT>, rtol=<NUM_LIT:0>):
|
A = np.atleast_2d(A)<EOL>u, s, vh = svd(A)<EOL>tol = max(atol, rtol * s[<NUM_LIT:0>])<EOL>nnz = (s >= tol).sum()<EOL>ns = vh[nnz:].conj().T<EOL>return ns<EOL>
|
Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : array_like(float, ndim=1 or 2)
A should be at most 2-D. A 1-D array with length k will be
treated as a 2-D with shape (1, k)
atol : scalar(float), optional(default=1e-13)
The absolute tolerance for a zero singular value. Singular
values smaller than `atol` are considered to be zero.
rtol : scalar(float), optional(default=0)
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
Returns
-------
ns : array_like(float, ndim=2)
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be
approximately zero.
Note: If both `atol` and `rtol` are positive, the combined tolerance
is the maximum of the two; that is:
tol = max(atol, rtol * smax)
Note: Singular values smaller than `tol` are considered to be zero.
|
f5138:m1
|
def smooth(x, window_len=<NUM_LIT:7>, window='<STR_LIT>'):
|
if len(x) < window_len:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if window_len < <NUM_LIT:3>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if not window_len % <NUM_LIT:2>: <EOL><INDENT>window_len += <NUM_LIT:1><EOL>print("<STR_LIT>".format(window_len))<EOL><DEDENT>windows = {'<STR_LIT>': np.hanning,<EOL>'<STR_LIT>': np.hamming,<EOL>'<STR_LIT>': np.bartlett,<EOL>'<STR_LIT>': np.blackman,<EOL>'<STR_LIT>': np.ones <EOL>}<EOL>k = int(window_len / <NUM_LIT:2>)<EOL>xb = x[:k] <EOL>xt = x[-k:] <EOL>s = np.concatenate((xb[::-<NUM_LIT:1>], x, xt[::-<NUM_LIT:1>]))<EOL>if window in windows.keys():<EOL><INDENT>w = windows[window](window_len)<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>".format(window)<EOL>print(msg + "<STR_LIT>")<EOL>w = windows['<STR_LIT>'](window_len)<EOL><DEDENT>return np.convolve(w / w.sum(), s, mode='<STR_LIT>')<EOL>
|
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
|
f5139:m0
|
def periodogram(x, window=None, window_len=<NUM_LIT:7>):
|
n = len(x)<EOL>I_w = np.abs(fft(x))**<NUM_LIT:2> / n<EOL>w = <NUM_LIT:2> * np.pi * np.arange(n) / n <EOL>w, I_w = w[:int(n/<NUM_LIT:2>)+<NUM_LIT:1>], I_w[:int(n/<NUM_LIT:2>)+<NUM_LIT:1>] <EOL>if window:<EOL><INDENT>I_w = smooth(I_w, window_len=window_len, window=window)<EOL><DEDENT>return w, I_w<EOL>
|
r"""
Computes the periodogram
.. math::
I(w) = \frac{1}{n} \Big[ \sum_{t=0}^{n-1} x_t e^{itw} \Big] ^2
at the Fourier frequences :math:`w_j := \frac{2 \pi j}{n}`,
:math:`j = 0, \dots, n - 1`, using the fast Fourier transform. Only the
frequences :math:`w_j` in :math:`[0, \pi]` and corresponding values
:math:`I(w_j)` are returned. If a window type is given then smoothing
is performed.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional(default=7)
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
|
f5139:m1
|
def ar_periodogram(x, window='<STR_LIT>', window_len=<NUM_LIT:7>):
|
<EOL>x_lag = x[:-<NUM_LIT:1>] <EOL>X = np.array([np.ones(len(x_lag)), x_lag]).T <EOL>y = np.array(x[<NUM_LIT:1>:]) <EOL>beta_hat = np.linalg.solve(X.T @ X, X.T @ y) <EOL>e_hat = y - X @ beta_hat <EOL>phi = beta_hat[<NUM_LIT:1>] <EOL>w, I_w = periodogram(e_hat, window=window, window_len=window_len)<EOL>I_w = I_w / np.abs(<NUM_LIT:1> - phi * np.exp(<NUM_LIT> * w))**<NUM_LIT:2><EOL>return w, I_w<EOL>
|
Compute periodogram from data x, using prewhitening, smoothing and
recoloring. The data is fitted to an AR(1) model for prewhitening,
and the residuals are used to compute a first-pass periodogram with
smoothing. The fitted coefficients are then used for recoloring.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
|
f5139:m2
|
@jit<EOL>def simulate_linear_model(A, x0, v, ts_length):
|
A = np.asarray(A)<EOL>n = A.shape[<NUM_LIT:0>]<EOL>x = np.empty((n, ts_length))<EOL>x[:, <NUM_LIT:0>] = x0<EOL>for t in range(ts_length-<NUM_LIT:1>):<EOL><INDENT>for i in range(n):<EOL><INDENT>x[i, t+<NUM_LIT:1>] = v[i, t] <EOL>for j in range(n):<EOL><INDENT>x[i, t+<NUM_LIT:1>] += A[i, j] * x[j, t] <EOL><DEDENT><DEDENT><DEDENT>return x<EOL>
|
r"""
This is a separate function for simulating a vector linear system of
the form
.. math::
x_{t+1} = A x_t + v_t
given :math:`x_0` = x0
Here :math:`x_t` and :math:`v_t` are both n x 1 and :math:`A` is n x n.
The purpose of separating this functionality out is to target it for
optimization by Numba. For the same reason, matrix multiplication is
broken down into for loops.
Parameters
----------
A : array_like or scalar(float)
Should be n x n
x0 : array_like
Should be n x 1. Initial condition
v : np.ndarray
Should be n x ts_length-1. Its t-th column is used as the time t
shock :math:`v_t`
ts_length : int
The length of the time series
Returns
--------
x : np.ndarray
Time series with ts_length columns, the t-th column being :math:`x_t`
|
f5140:m0
|
def convert(self, x):
|
return np.atleast_2d(np.asarray(x, dtype='<STR_LIT:float>'))<EOL>
|
Convert array_like objects (lists of lists, floats, etc.) into
well formed 2D NumPy arrays
|
f5140:c0:m3
|
def simulate(self, ts_length=<NUM_LIT:100>, random_state=None):
|
random_state = check_random_state(random_state)<EOL>x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0)<EOL>w = random_state.randn(self.m, ts_length-<NUM_LIT:1>)<EOL>v = self.C.dot(w) <EOL>x = simulate_linear_model(self.A, x0, v, ts_length)<EOL>if self.H is not None:<EOL><INDENT>v = random_state.randn(self.l, ts_length)<EOL>y = self.G.dot(x) + self.H.dot(v)<EOL><DEDENT>else:<EOL><INDENT>y = self.G.dot(x)<EOL><DEDENT>return x, y<EOL>
|
r"""
Simulate a time series of length ts_length, first drawing
.. math::
x_0 \sim N(\mu_0, \Sigma_0)
Parameters
----------
ts_length : scalar(int), optional(default=100)
The length of the simulation
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
x : array_like(float)
An n x ts_length array, where the t-th column is :math:`x_t`
y : array_like(float)
A k x ts_length array, where the t-th column is :math:`y_t`
|
f5140:c0:m4
|
def replicate(self, T=<NUM_LIT:10>, num_reps=<NUM_LIT:100>, random_state=None):
|
random_state = check_random_state(random_state)<EOL>x = np.empty((self.n, num_reps))<EOL>for j in range(num_reps):<EOL><INDENT>x_T, _ = self.simulate(ts_length=T+<NUM_LIT:1>, random_state=random_state)<EOL>x[:, j] = x_T[:, -<NUM_LIT:1>]<EOL><DEDENT>if self.H is not None:<EOL><INDENT>v = random_state.randn(self.l, num_reps)<EOL>y = self.G.dot(x) + self.H.dot(v)<EOL><DEDENT>else:<EOL><INDENT>y = self.G.dot(x)<EOL><DEDENT>return x, y<EOL>
|
r"""
Simulate num_reps observations of :math:`x_T` and :math:`y_T` given
:math:`x_0 \sim N(\mu_0, \Sigma_0)`.
Parameters
----------
T : scalar(int), optional(default=10)
The period that we want to replicate values for
num_reps : scalar(int), optional(default=100)
The number of replications that we want
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
x : array_like(float)
An n x num_reps array, where the j-th column is the j_th
observation of :math:`x_T`
y : array_like(float)
A k x num_reps array, where the j-th column is the j_th
observation of :math:`y_T`
|
f5140:c0:m5
|
def moment_sequence(self):
|
<EOL>A, C, G, H = self.A, self.C, self.G, self.H<EOL>mu_x, Sigma_x = self.mu_0, self.Sigma_0<EOL>while <NUM_LIT:1>:<EOL><INDENT>mu_y = G.dot(mu_x)<EOL>if H is None:<EOL><INDENT>Sigma_y = G.dot(Sigma_x).dot(G.T)<EOL><DEDENT>else:<EOL><INDENT>Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)<EOL><DEDENT>yield mu_x, mu_y, Sigma_x, Sigma_y<EOL>mu_x = A.dot(mu_x)<EOL>Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T)<EOL><DEDENT>
|
r"""
Create a generator to calculate the population mean and
variance-convariance matrix for both :math:`x_t` and :math:`y_t`
starting at the initial condition (self.mu_0, self.Sigma_0).
Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x,
Sigma_y) for the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t
|
f5140:c0:m6
|
def stationary_distributions(self, max_iter=<NUM_LIT:200>, tol=<NUM_LIT>):
|
<EOL>m = self.moment_sequence()<EOL>mu_x, mu_y, Sigma_x, Sigma_y = next(m)<EOL>i = <NUM_LIT:0><EOL>error = tol + <NUM_LIT:1><EOL>while error > tol:<EOL><INDENT>if i > max_iter:<EOL><INDENT>fail_message = '<STR_LIT>'<EOL>raise ValueError(fail_message.format(max_iter))<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL>mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)<EOL>error_mu = np.max(np.abs(mu_x1 - mu_x))<EOL>error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))<EOL>error = max(error_mu, error_Sigma)<EOL>mu_x, Sigma_x = mu_x1, Sigma_x1<EOL><DEDENT><DEDENT>mu_x_star, Sigma_x_star = mu_x, Sigma_x<EOL>mu_y_star, Sigma_y_star = mu_y1, Sigma_y1<EOL>return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star<EOL>
|
r"""
Compute the moments of the stationary distributions of :math:`x_t` and
:math:`y_t` if possible. Computation is by iteration, starting from
the initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of :math:`x_t`
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of :math:`y_t`
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of :math:`x_t`
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of :math:`y_t`
|
f5140:c0:m7
|
def geometric_sums(self, beta, x_t):
|
I = np.identity(self.n)<EOL>S_x = solve(I - beta * self.A, x_t)<EOL>S_y = self.G.dot(S_x)<EOL>return S_x, S_y<EOL>
|
r"""
Forecast the geometric sums
.. math::
S_x := E \Big[ \sum_{j=0}^{\infty} \beta^j x_{t+j} | x_t \Big]
S_y := E \Big[ \sum_{j=0}^{\infty} \beta^j y_{t+j} | x_t \Big]
Parameters
----------
beta : scalar(float)
Discount factor, in [0, 1)
beta : array_like(float)
The term x_t for conditioning
Returns
-------
S_x : array_like(float)
Geometric sum as defined above
S_y : array_like(float)
Geometric sum as defined above
|
f5140:c0:m8
|
def impulse_response(self, j=<NUM_LIT:5>):
|
<EOL>A, C, G, H = self.A, self.C, self.G, self.H<EOL>Apower = np.copy(A)<EOL>xcoef = [C]<EOL>ycoef = [np.dot(G, C)]<EOL>for i in range(j):<EOL><INDENT>xcoef.append(np.dot(Apower, C))<EOL>ycoef.append(np.dot(G, np.dot(Apower, C)))<EOL>Apower = np.dot(Apower, A)<EOL><DEDENT>return xcoef, ycoef<EOL>
|
r"""
Pulls off the imuplse response coefficients to a shock
in :math:`w_{t}` for :math:`x` and :math:`y`
Important to note: We are uninterested in the shocks to
v for this method
* :math:`x` coefficients are :math:`C, AC, A^2 C...`
* :math:`y` coefficients are :math:`GC, GAC, GA^2C...`
Parameters
----------
j : Scalar(int)
Number of coefficients that we want
Returns
-------
xcoef : list(array_like(float, 2))
The coefficients for x
ycoef : list(array_like(float, 2))
The coefficients for y
|
f5140:c0:m9
|
def ckron(*arrays):
|
return reduce(np.kron, arrays)<EOL>
|
Repeatedly applies the np.kron function to an arbitrary number of
input arrays
Parameters
----------
*arrays : tuple/list of np.ndarray
Returns
-------
out : np.ndarray
The result of repeated kronecker products.
Notes
-----
Based of original function `ckron` in CompEcon toolbox by Miranda
and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
|
f5141:m0
|
def gridmake(*arrays):
|
if all([i.ndim == <NUM_LIT:1> for i in arrays]):<EOL><INDENT>d = len(arrays)<EOL>if d == <NUM_LIT:2>:<EOL><INDENT>out = _gridmake2(*arrays)<EOL><DEDENT>else:<EOL><INDENT>out = _gridmake2(arrays[<NUM_LIT:0>], arrays[<NUM_LIT:1>])<EOL>for arr in arrays[<NUM_LIT:2>:]:<EOL><INDENT>out = _gridmake2(out, arr)<EOL><DEDENT><DEDENT>return out<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError("<STR_LIT>")<EOL><DEDENT>
|
Expands one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
*arrays : tuple/list of np.ndarray
Tuple/list of vectors to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
|
f5141:m1
|
def _gridmake2(x1, x2):
|
if x1.ndim == <NUM_LIT:1> and x2.ndim == <NUM_LIT:1>:<EOL><INDENT>return np.column_stack([np.tile(x1, x2.shape[<NUM_LIT:0>]),<EOL>np.repeat(x2, x1.shape[<NUM_LIT:0>])])<EOL><DEDENT>elif x1.ndim > <NUM_LIT:1> and x2.ndim == <NUM_LIT:1>:<EOL><INDENT>first = np.tile(x1, (x2.shape[<NUM_LIT:0>], <NUM_LIT:1>))<EOL>second = np.repeat(x2, x1.shape[<NUM_LIT:0>])<EOL>return np.column_stack([first, second])<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError("<STR_LIT>")<EOL><DEDENT>
|
Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
|
f5141:m2
|
def _csr_matrix_indices(S):
|
m, n = S.shape<EOL>for i in range(m):<EOL><INDENT>for j in range(S.indptr[i], S.indptr[i+<NUM_LIT:1>]):<EOL><INDENT>row_index, col_index = i, S.indices[j]<EOL>yield row_index, col_index<EOL><DEDENT><DEDENT>
|
Generate the indices of nonzero entries of a csr_matrix S
|
f5142:m1
|
def random_tournament_graph(n, random_state=None):
|
random_state = check_random_state(random_state)<EOL>num_edges = n * (n-<NUM_LIT:1>) // <NUM_LIT:2><EOL>r = random_state.random_sample(num_edges)<EOL>row = np.empty(num_edges, dtype=int)<EOL>col = np.empty(num_edges, dtype=int)<EOL>_populate_random_tournament_row_col(n, r, row, col)<EOL>data = np.ones(num_edges, dtype=bool)<EOL>adj_matrix = sparse.coo_matrix((data, (row, col)), shape=(n, n))<EOL>return DiGraph(adj_matrix)<EOL>
|
Return a random tournament graph [1]_ with n nodes.
Parameters
----------
n : scalar(int)
Number of nodes.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
DiGraph
A DiGraph representing the tournament graph.
References
----------
.. [1] `Tournament (graph theory)
<https://en.wikipedia.org/wiki/Tournament_(graph_theory)>`_,
Wikipedia.
|
f5142:m2
|
@jit(nopython=True, cache=True)<EOL>def _populate_random_tournament_row_col(n, r, row, col):
|
k = <NUM_LIT:0><EOL>for i in range(n):<EOL><INDENT>for j in range(i+<NUM_LIT:1>, n):<EOL><INDENT>if r[k] < <NUM_LIT:0.5>:<EOL><INDENT>row[k], col[k] = i, j<EOL><DEDENT>else:<EOL><INDENT>row[k], col[k] = j, i<EOL><DEDENT>k += <NUM_LIT:1><EOL><DEDENT><DEDENT>
|
Populate ndarrays `row` and `col` with directed edge indices
determined by random numbers in `r` for a tournament graph with n
nodes, which has num_edges = n * (n-1) // 2 edges.
Parameters
----------
n : scalar(int)
Number of nodes.
r : ndarray(float, ndim=1)
ndarray of length num_edges containing random numbers in [0, 1).
row, col : ndarray(int, ndim=1)
ndarrays of length num_edges to be modified in place.
|
f5142:m3
|
def _find_scc(self):
|
<EOL>self._num_scc, self._scc_proj =csgraph.connected_components(self.csgraph, connection='<STR_LIT>')<EOL>
|
Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs.
|
f5142:c0:m5
|
def _condensation_lil(self):
|
condensation_lil = sparse.lil_matrix(<EOL>(self.num_strongly_connected_components,<EOL>self.num_strongly_connected_components), dtype=bool<EOL>)<EOL>scc_proj = self.scc_proj<EOL>for node_from, node_to in _csr_matrix_indices(self.csgraph):<EOL><INDENT>scc_from, scc_to = scc_proj[node_from], scc_proj[node_to]<EOL>if scc_from != scc_to:<EOL><INDENT>condensation_lil[scc_from, scc_to] = True<EOL><DEDENT><DEDENT>return condensation_lil<EOL>
|
Return the sparse matrix representation of the condensation digraph
in lil format.
|
f5142:c0:m9
|
def _find_sink_scc(self):
|
condensation_lil = self._condensation_lil()<EOL>self._sink_scc_labels =np.where(np.logical_not(condensation_lil.rows))[<NUM_LIT:0>]<EOL>
|
Set self._sink_scc_labels, which is a list containing the labels of
the strongly connected components.
|
f5142:c0:m10
|
def _compute_period(self):
|
<EOL>if self.n == <NUM_LIT:1>:<EOL><INDENT>if self.csgraph[<NUM_LIT:0>, <NUM_LIT:0>] == <NUM_LIT:0>: <EOL><INDENT>self._period = <NUM_LIT:1> <EOL>self._cyclic_components_proj = np.zeros(self.n, dtype=int)<EOL>return None<EOL><DEDENT>else: <EOL><INDENT>self._period = <NUM_LIT:1><EOL>self._cyclic_components_proj = np.zeros(self.n, dtype=int)<EOL>return None<EOL><DEDENT><DEDENT>if not self.is_strongly_connected:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if np.any(self.csgraph.diagonal() > <NUM_LIT:0>):<EOL><INDENT>self._period = <NUM_LIT:1><EOL>self._cyclic_components_proj = np.zeros(self.n, dtype=int)<EOL>return None<EOL><DEDENT>node_order, predecessors =csgraph.breadth_first_order(self.csgraph, i_start=<NUM_LIT:0>)<EOL>bfs_tree_csr =csgraph.reconstruct_path(self.csgraph, predecessors)<EOL>non_bfs_tree_csr = self.csgraph - bfs_tree_csr<EOL>non_bfs_tree_csr.eliminate_zeros()<EOL>level = np.zeros(self.n, dtype=int)<EOL>for i in range(<NUM_LIT:1>, self.n):<EOL><INDENT>level[node_order[i]] = level[predecessors[node_order[i]]] + <NUM_LIT:1><EOL><DEDENT>d = <NUM_LIT:0><EOL>for node_from, node_to in _csr_matrix_indices(non_bfs_tree_csr):<EOL><INDENT>value = level[node_from] - level[node_to] + <NUM_LIT:1><EOL>d = gcd(d, value)<EOL>if d == <NUM_LIT:1>:<EOL><INDENT>self._period = <NUM_LIT:1><EOL>self._cyclic_components_proj = np.zeros(self.n, dtype=int)<EOL>return None<EOL><DEDENT><DEDENT>self._period = d<EOL>self._cyclic_components_proj = level % d<EOL>
|
Set ``self._period`` and ``self._cyclic_components_proj``.
Use the algorithm described in:
J. P. Jarvis and D. R. Shier,
"Graph-Theoretic Analysis of Finite Markov Chains," 1996.
|
f5142:c0:m17
|
def subgraph(self, nodes):
|
adj_matrix = self.csgraph[np.ix_(nodes, nodes)]<EOL>weighted = True <EOL>if self.node_labels is not None:<EOL><INDENT>node_labels = self.node_labels[nodes]<EOL><DEDENT>else:<EOL><INDENT>node_labels = None<EOL><DEDENT>return DiGraph(adj_matrix, weighted=weighted, node_labels=node_labels)<EOL>
|
Return the subgraph consisting of the given nodes and edges
between thses nodes.
Parameters
----------
nodes : array_like(int, ndim=1)
Array of node indices.
Returns
-------
DiGraph
A DiGraph representing the subgraph.
|
f5142:c0:m22
|
def compute_fixed_point(T, v, error_tol=<NUM_LIT>, max_iter=<NUM_LIT:50>, verbose=<NUM_LIT:2>,<EOL>print_skip=<NUM_LIT:5>, method='<STR_LIT>', *args, **kwargs):
|
if max_iter < <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if verbose not in (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if method not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if method == '<STR_LIT>':<EOL><INDENT>is_approx_fp =lambda v: _is_approx_fp(T, v, error_tol, *args, **kwargs)<EOL>v_star, converged, iterate =_compute_fixed_point_ig(T, v, max_iter, verbose, print_skip,<EOL>is_approx_fp, *args, **kwargs)<EOL>return v_star<EOL><DEDENT>iterate = <NUM_LIT:0><EOL>if verbose == <NUM_LIT:2>:<EOL><INDENT>start_time = time.time()<EOL>_print_after_skip(print_skip, it=None)<EOL><DEDENT>while True:<EOL><INDENT>new_v = T(v, *args, **kwargs)<EOL>iterate += <NUM_LIT:1><EOL>error = np.max(np.abs(new_v - v))<EOL>try:<EOL><INDENT>v[:] = new_v<EOL><DEDENT>except TypeError:<EOL><INDENT>v = new_v<EOL><DEDENT>if error <= error_tol or iterate >= max_iter:<EOL><INDENT>break<EOL><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>etime = time.time() - start_time<EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>etime = time.time() - start_time<EOL>print_skip = <NUM_LIT:1><EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT>if verbose >= <NUM_LIT:1>:<EOL><INDENT>if error > error_tol:<EOL><INDENT>warnings.warn(_non_convergence_msg, RuntimeWarning)<EOL><DEDENT>elif verbose == <NUM_LIT:2>:<EOL><INDENT>print(_convergence_msg.format(iterate=iterate))<EOL><DEDENT><DEDENT>return v<EOL>
|
r"""
Computes and returns an approximate fixed point of the function `T`.
The default method `'iteration'` simply iterates the function given
an initial condition `v` and returns :math:`T^k v` when the
condition :math:`\lVert T^k v - T^{k-1} v\rVert \leq
\mathrm{error\_tol}` is satisfied or the number of iterations
:math:`k` reaches `max_iter`. Provided that `T` is a contraction
mapping or similar, :math:`T^k v` will be an approximation to the
fixed point.
The method `'imitation_game'` uses the "imitation game algorithm"
developed by McLennan and Tourky [1]_, which internally constructs
a sequence of two-player games called imitation games and utilizes
their Nash equilibria, computed by the Lemke-Howson algorithm
routine. It finds an approximate fixed point of `T`, a point
:math:`v^*` such that :math:`\lVert T(v) - v\rVert \leq
\mathrm{error\_tol}`, provided `T` is a function that satisfies the
assumptions of Brouwer's fixed point theorm, i.e., a continuous
function that maps a compact and convex set to itself.
Parameters
----------
T : callable
A callable object (e.g., function) that acts on v
v : object
An object such that T(v) is defined; modified in place if
`method='iteration' and `v` is an array
error_tol : scalar(float), optional(default=1e-3)
Error tolerance
max_iter : scalar(int), optional(default=50)
Maximum number of iterations
verbose : scalar(int), optional(default=2)
Level of feedback (0 for no output, 1 for warnings only, 2 for
warning and residual error reports during iteration)
print_skip : scalar(int), optional(default=5)
How many iterations to apply between print messages (effective
only when `verbose=2`)
method : str, optional(default='iteration')
str in {'iteration', 'imitation_game'}. Method of computing
an approximate fixed point
args, kwargs :
Other arguments and keyword arguments that are passed directly
to the function T each time it is called
Returns
-------
v : object
The approximate fixed point
References
----------
.. [1] A. McLennan and R. Tourky, "From Imitation Games to
Kakutani," 2006.
|
f5143:m2
|
def _compute_fixed_point_ig(T, v, max_iter, verbose, print_skip, is_approx_fp,<EOL>*args, **kwargs):
|
if verbose == <NUM_LIT:2>:<EOL><INDENT>start_time = time.time()<EOL>_print_after_skip(print_skip, it=None)<EOL><DEDENT>x_new = v<EOL>y_new = T(x_new, *args, **kwargs)<EOL>iterate = <NUM_LIT:1><EOL>converged = is_approx_fp(x_new)<EOL>if converged or iterate >= max_iter:<EOL><INDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>error = np.max(np.abs(y_new - x_new))<EOL>etime = time.time() - start_time<EOL>print_skip = <NUM_LIT:1><EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT>if verbose >= <NUM_LIT:1>:<EOL><INDENT>if not converged:<EOL><INDENT>warnings.warn(_non_convergence_msg, RuntimeWarning)<EOL><DEDENT>elif verbose == <NUM_LIT:2>:<EOL><INDENT>print(_convergence_msg.format(iterate=iterate))<EOL><DEDENT><DEDENT>return x_new, converged, iterate<EOL><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>error = np.max(np.abs(y_new - x_new))<EOL>etime = time.time() - start_time<EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT>buff_size = <NUM_LIT:2>**<NUM_LIT:8><EOL>buff_size = min(max_iter, buff_size)<EOL>shape = (buff_size,) + np.asarray(x_new).shape<EOL>X, Y = np.empty(shape), np.empty(shape)<EOL>X[<NUM_LIT:0>], Y[<NUM_LIT:0>] = x_new, y_new<EOL>x_new = Y[<NUM_LIT:0>]<EOL>tableaux = tuple(np.empty((buff_size, buff_size*<NUM_LIT:2>+<NUM_LIT:1>)) for i in range(<NUM_LIT:2>))<EOL>bases = tuple(np.empty(buff_size, dtype=int) for i in range(<NUM_LIT:2>))<EOL>max_piv = <NUM_LIT:10>**<NUM_LIT:6> <EOL>while True:<EOL><INDENT>y_new = T(x_new, *args, **kwargs)<EOL>iterate += <NUM_LIT:1><EOL>converged = is_approx_fp(x_new)<EOL>if converged or iterate >= max_iter:<EOL><INDENT>break<EOL><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>error = np.max(np.abs(y_new - x_new))<EOL>etime = time.time() - start_time<EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT>try:<EOL><INDENT>X[iterate-<NUM_LIT:1>] = x_new<EOL>Y[iterate-<NUM_LIT:1>] = y_new<EOL><DEDENT>except IndexError:<EOL><INDENT>buff_size = min(max_iter, buff_size*<NUM_LIT:2>)<EOL>shape = (buff_size,) + X.shape[<NUM_LIT:1>:]<EOL>X_tmp, Y_tmp = X, Y<EOL>X, Y = np.empty(shape), np.empty(shape)<EOL>X[:X_tmp.shape[<NUM_LIT:0>]], Y[:Y_tmp.shape[<NUM_LIT:0>]] = X_tmp, Y_tmp<EOL>X[iterate-<NUM_LIT:1>], Y[iterate-<NUM_LIT:1>] = x_new, y_new<EOL>tableaux = tuple(np.empty((buff_size, buff_size*<NUM_LIT:2>+<NUM_LIT:1>))<EOL>for i in range(<NUM_LIT:2>))<EOL>bases = tuple(np.empty(buff_size, dtype=int) for i in range(<NUM_LIT:2>))<EOL><DEDENT>m = iterate<EOL>tableaux_curr = tuple(tableau[:m, :<NUM_LIT:2>*m+<NUM_LIT:1>] for tableau in tableaux)<EOL>bases_curr = tuple(basis[:m] for basis in bases)<EOL>_initialize_tableaux_ig(X[:m], Y[:m], tableaux_curr, bases_curr)<EOL>converged, num_iter = _lemke_howson_tbl(<EOL>tableaux_curr, bases_curr, init_pivot=m-<NUM_LIT:1>, max_iter=max_piv<EOL>)<EOL>_, rho = _get_mixed_actions(tableaux_curr, bases_curr)<EOL>if Y.ndim <= <NUM_LIT:2>:<EOL><INDENT>x_new = rho.dot(Y[:m])<EOL><DEDENT>else:<EOL><INDENT>shape_Y = Y.shape<EOL>Y_2d = Y.reshape(shape_Y[<NUM_LIT:0>], np.prod(shape_Y[<NUM_LIT:1>:]))<EOL>x_new = rho.dot(Y_2d[:m]).reshape(shape_Y[<NUM_LIT:1>:])<EOL><DEDENT><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>error = np.max(np.abs(y_new - x_new))<EOL>etime = time.time() - start_time<EOL>print_skip = <NUM_LIT:1><EOL>_print_after_skip(print_skip, iterate, error, etime)<EOL><DEDENT>if verbose >= <NUM_LIT:1>:<EOL><INDENT>if not converged:<EOL><INDENT>warnings.warn(_non_convergence_msg, RuntimeWarning)<EOL><DEDENT>elif verbose == <NUM_LIT:2>:<EOL><INDENT>print(_convergence_msg.format(iterate=iterate))<EOL><DEDENT><DEDENT>return x_new, converged, iterate<EOL>
|
Implement the imitation game algorithm by McLennan and Tourky (2006)
for computing an approximate fixed point of `T`.
Parameters
----------
is_approx_fp : callable
A callable with signature `is_approx_fp(v)` which determines
whether `v` is an approximate fixed point with a bool return
value (i.e., True or False)
For the other parameters, see Parameters in compute_fixed_point.
Returns
-------
x_new : scalar(float) or ndarray(float)
Approximate fixed point.
converged : bool
Whether the routine has converged.
iterate : scalar(int)
Number of iterations.
|
f5143:m3
|
@jit(nopython=True)<EOL>def _initialize_tableaux_ig(X, Y, tableaux, bases):
|
m = X.shape[<NUM_LIT:0>]<EOL>min_ = np.zeros(m)<EOL>for i in range(m):<EOL><INDENT>for j in range(<NUM_LIT:2>*m):<EOL><INDENT>if j == i or j == i + m:<EOL><INDENT>tableaux[<NUM_LIT:0>][i, j] = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>tableaux[<NUM_LIT:0>][i, j] = <NUM_LIT:0><EOL><DEDENT><DEDENT>tableaux[<NUM_LIT:0>][i, <NUM_LIT:2>*m] = <NUM_LIT:1><EOL><DEDENT>for i in range(m):<EOL><INDENT>for j in range(m):<EOL><INDENT>if j == i:<EOL><INDENT>tableaux[<NUM_LIT:1>][i, j] = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>tableaux[<NUM_LIT:1>][i, j] = <NUM_LIT:0><EOL><DEDENT><DEDENT>for j in range(m):<EOL><INDENT>d = X[i] - Y[j]<EOL>tableaux[<NUM_LIT:1>][i, m+j] = _square_sum(d) * (-<NUM_LIT:1>)<EOL>if tableaux[<NUM_LIT:1>][i, m+j] < min_[j]:<EOL><INDENT>min_[j] = tableaux[<NUM_LIT:1>][i, m+j]<EOL><DEDENT><DEDENT>tableaux[<NUM_LIT:1>][i, <NUM_LIT:2>*m] = <NUM_LIT:1><EOL><DEDENT>for i in range(m):<EOL><INDENT>for j in range(m):<EOL><INDENT>tableaux[<NUM_LIT:1>][i, m+j] -= min_[j]<EOL>tableaux[<NUM_LIT:1>][i, m+j] += <NUM_LIT:1><EOL><DEDENT><DEDENT>for pl, start in enumerate([m, <NUM_LIT:0>]):<EOL><INDENT>for i in range(m):<EOL><INDENT>bases[pl][i] = start + i<EOL><DEDENT><DEDENT>return tableaux, bases<EOL>
|
Given sequences `X` and `Y` of ndarrays, initialize the tableau and
basis arrays in place for the "geometric" imitation game as defined
in McLennan and Tourky (2006), to be passed to `_lemke_howson_tbl`.
Parameters
----------
X, Y : ndarray(float)
Arrays of the same shape (m, n).
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays to be used to store the tableaux, of shape
(2m, 2m). Modified in place.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays to be used to store the bases, of shape
(m,). Modified in place.
Returns
-------
tableaux : tuple(ndarray(float, ndim=2))
View to `tableaux`.
bases : tuple(ndarray(int, ndim=1))
View to `bases`.
|
f5143:m4
|
def d_operator(self, P):
|
C, theta = self.C, self.theta<EOL>I = np.identity(self.j)<EOL>S1 = dot(P, C)<EOL>S2 = dot(C.T, S1)<EOL>dP = P + dot(S1, solve(theta * I - S2, S1.T))<EOL>return dP<EOL>
|
r"""
The D operator, mapping P into
.. math::
D(P) := P + PC(\theta I - C'PC)^{-1} C'P.
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
dP : array_like(float, ndim=2)
The matrix P after applying the D operator
|
f5144:c0:m3
|
def b_operator(self, P):
|
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta<EOL>S1 = Q + beta * dot(B.T, dot(P, B))<EOL>S2 = beta * dot(B.T, dot(P, A))<EOL>S3 = beta * dot(A.T, dot(P, A))<EOL>F = solve(S1, S2) if not self.pure_forecasting else np.zeros(<EOL>(self.k, self.n))<EOL>new_P = R - dot(S2.T, F) + S3<EOL>return F, new_P<EOL>
|
r"""
The B operator, mapping P into
.. math::
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
and also returning
.. math::
F := (Q + \beta B'PB)^{-1} \beta B'PA
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
F : array_like(float, ndim=2)
The F matrix as defined above
new_p : array_like(float, ndim=2)
The matrix P after applying the B operator
|
f5144:c0:m4
|
def robust_rule(self, method='<STR_LIT>'):
|
<EOL>A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R<EOL>beta, theta = self.beta, self.theta<EOL>k, j = self.k, self.j<EOL>I = identity(j)<EOL>Z = np.zeros((k, j))<EOL>if self.pure_forecasting:<EOL><INDENT>lq = LQ(-beta*I*theta, R, A, C, beta=beta)<EOL>P, f, d = lq.stationary_values(method=method)<EOL>F = np.zeros((self.k, self.n))<EOL>K = -f[:k, :]<EOL><DEDENT>else:<EOL><INDENT>Ba = hstack([B, C])<EOL>Qa = vstack([hstack([Q, Z]), hstack([Z.T, -beta*I*theta])])<EOL>lq = LQ(Qa, R, A, Ba, beta=beta)<EOL>P, f, d = lq.stationary_values(method=method)<EOL>F = f[:k, :]<EOL>K = -f[k:f.shape[<NUM_LIT:0>], :]<EOL><DEDENT>return F, K, P<EOL>
|
This method solves the robust control problem by tricking it
into a stacked LQ problem, as described in chapter 2 of Hansen-
Sargent's text "Robustness." The optimal control with observed
state is
.. math::
u_t = - F x_t
And the value function is :math:`-x'Px`
Parameters
----------
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
F : array_like(float, ndim=2)
The optimal control matrix from above
P : array_like(float, ndim=2)
The positive semi-definite matrix defining the value
function
K : array_like(float, ndim=2)
the worst-case shock matrix K, where
:math:`w_{t+1} = K x_t` is the worst case shock
|
f5144:c0:m5
|
def robust_rule_simple(self, P_init=None, max_iter=<NUM_LIT>, tol=<NUM_LIT>):
|
<EOL>A, B, C, Q, R = self.A, self.B, self.C, self.Q, self.R<EOL>beta, theta = self.beta, self.theta<EOL>P = np.zeros((self.n, self.n)) if P_init is None else P_init<EOL>iterate, e = <NUM_LIT:0>, tol + <NUM_LIT:1><EOL>while iterate < max_iter and e > tol:<EOL><INDENT>F, new_P = self.b_operator(self.d_operator(P))<EOL>e = np.sqrt(np.sum((new_P - P)**<NUM_LIT:2>))<EOL>iterate += <NUM_LIT:1><EOL>P = new_P<EOL><DEDENT>I = np.identity(self.j)<EOL>S1 = P.dot(C)<EOL>S2 = C.T.dot(S1)<EOL>K = inv(theta * I - S2).dot(S1.T).dot(A - B.dot(F))<EOL>return F, K, P<EOL>
|
A simple algorithm for computing the robust policy F and the
corresponding value function P, based around straightforward
iteration with the robust Bellman operator. This function is
easier to understand but one or two orders of magnitude slower
than self.robust_rule(). For more information see the docstring
of that method.
Parameters
----------
P_init : array_like(float, ndim=2), optional(default=None)
The initial guess for the value function matrix. It will
be a matrix of zeros if no guess is given
max_iter : scalar(int), optional(default=80)
The maximum number of iterations that are allowed
tol : scalar(float), optional(default=1e-8)
The tolerance for convergence
Returns
-------
F : array_like(float, ndim=2)
The optimal control matrix from above
P : array_like(float, ndim=2)
The positive semi-definite matrix defining the value
function
K : array_like(float, ndim=2)
the worst-case shock matrix K, where
:math:`w_{t+1} = K x_t` is the worst case shock
|
f5144:c0:m6
|
def F_to_K(self, F, method='<STR_LIT>'):
|
Q2 = self.beta * self.theta<EOL>R2 = - self.R - dot(F.T, dot(self.Q, F))<EOL>A2 = self.A - dot(self.B, F)<EOL>B2 = self.C<EOL>lq = LQ(Q2, R2, A2, B2, beta=self.beta)<EOL>neg_P, neg_K, d = lq.stationary_values(method=method)<EOL>return -neg_K, -neg_P<EOL>
|
Compute agent 2's best cost-minimizing response K, given F.
Parameters
----------
F : array_like(float, ndim=2)
A k x n array
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
K : array_like(float, ndim=2)
Agent's best cost minimizing response for a given F
P : array_like(float, ndim=2)
The value function for a given F
|
f5144:c0:m7
|
def K_to_F(self, K, method='<STR_LIT>'):
|
A1 = self.A + dot(self.C, K)<EOL>B1 = self.B<EOL>Q1 = self.Q<EOL>R1 = self.R - self.beta * self.theta * dot(K.T, K)<EOL>lq = LQ(Q1, R1, A1, B1, beta=self.beta)<EOL>P, F, d = lq.stationary_values(method=method)<EOL>return F, P<EOL>
|
Compute agent 1's best value-maximizing response F, given K.
Parameters
----------
K : array_like(float, ndim=2)
A j x n array
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
F : array_like(float, ndim=2)
The policy function for a given K
P : array_like(float, ndim=2)
The value function for a given K
|
f5144:c0:m8
|
def compute_deterministic_entropy(self, F, K, x0):
|
H0 = dot(K.T, K)<EOL>C0 = np.zeros((self.n, <NUM_LIT:1>))<EOL>A0 = self.A - dot(self.B, F) + dot(self.C, K)<EOL>e = var_quadratic_sum(A0, C0, H0, self.beta, x0)<EOL>return e<EOL>
|
r"""
Given K and F, compute the value of deterministic entropy, which
is
.. math::
\sum_t \beta^t x_t' K'K x_t`
with
.. math::
x_{t+1} = (A - BF + CK) x_t
Parameters
----------
F : array_like(float, ndim=2)
The policy function, a k x n array
K : array_like(float, ndim=2)
The worst case matrix, a j x n array
x0 : array_like(float, ndim=1)
The initial condition for state
Returns
-------
e : scalar(int)
The deterministic entropy
|
f5144:c0:m9
|
def evaluate_F(self, F):
|
<EOL>Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C<EOL>beta, theta = self.beta, self.theta<EOL>K_F, P_F = self.F_to_K(F)<EOL>I = np.identity(self.j)<EOL>H = inv(I - C.T.dot(P_F.dot(C)) / theta)<EOL>d_F = log(det(H))<EOL>sig = -<NUM_LIT:1.0> / theta<EOL>AO = sqrt(beta) * (A - dot(B, F) + dot(C, K_F))<EOL>O_F = solve_discrete_lyapunov(AO.T, beta * dot(K_F.T, K_F))<EOL>ho = (trace(H - <NUM_LIT:1>) - d_F) / <NUM_LIT><EOL>tr = trace(dot(O_F, C.dot(H.dot(C.T))))<EOL>o_F = (ho + beta * tr) / (<NUM_LIT:1> - beta)<EOL>return K_F, P_F, d_F, O_F, o_F<EOL>
|
Given a fixed policy F, with the interpretation :math:`u = -F x`, this
function computes the matrix :math:`P_F` and constant :math:`d_F`
associated with discounted cost :math:`J_F(x) = x' P_F x + d_F`
Parameters
----------
F : array_like(float, ndim=2)
The policy function, a k x n array
Returns
-------
P_F : array_like(float, ndim=2)
Matrix for discounted cost
d_F : scalar(float)
Constant for discounted cost
K_F : array_like(float, ndim=2)
Worst case policy
O_F : array_like(float, ndim=2)
Matrix for discounted entropy
o_F : scalar(float)
Constant for discounted entropy
|
f5144:c0:m10
|
@njit<EOL>def lorenz_curve(y):
|
n = len(y)<EOL>y = np.sort(y)<EOL>s = np.zeros(n + <NUM_LIT:1>)<EOL>s[<NUM_LIT:1>:] = np.cumsum(y)<EOL>cum_people = np.zeros(n + <NUM_LIT:1>)<EOL>cum_income = np.zeros(n + <NUM_LIT:1>)<EOL>for i in range(<NUM_LIT:1>, n + <NUM_LIT:1>):<EOL><INDENT>cum_people[i] = i / n<EOL>cum_income[i] = s[i] / s[n]<EOL><DEDENT>return cum_people, cum_income<EOL>
|
Calculates the Lorenz Curve, a graphical representation of the distribution of income
or wealth.
It returns the cumulative share of people (x-axis) and the cumulative share of income earned
Parameters
----------
y : array_like(float or int, ndim=1)
Array of income/wealth for each individual. Unordered or ordered is fine.
Returns
-------
cum_people : array_like(float, ndim=1)
Cumulative share of people for each person index (i/n)
cum_income : array_like(float, ndim=1)
Cumulative share of income for each person index
References
----------
.. [1] https://en.wikipedia.org/wiki/Lorenz_curve
Examples
--------
>>> a_val, n = 3, 10_000
>>> y = np.random.pareto(a_val, size=n)
>>> f_vals, l_vals = lorenz(y)
|
f5145:m0
|
@njit(parallel=True)<EOL>def gini_coefficient(y):
|
n = len(y)<EOL>i_sum = np.zeros(n)<EOL>for i in prange(n):<EOL><INDENT>for j in range(n):<EOL><INDENT>i_sum[i] += abs(y[i] - y[j])<EOL><DEDENT><DEDENT>return np.sum(i_sum) / (<NUM_LIT:2> * n * np.sum(y))<EOL>
|
r"""
Implements the Gini inequality index
Parameters
-----------
y : array_like(float)
Array of income/wealth for each individual. Ordered or unordered is fine
Returns
-------
Gini index: float
The gini index describing the inequality of the array of income/wealth
References
----------
https://en.wikipedia.org/wiki/Gini_coefficient
|
f5145:m1
|
def shorrocks_index(A):
|
A = np.asarray(A) <EOL>m, n = A.shape<EOL>if m != n:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>diag_sum = np.diag(A).sum()<EOL>return (m - diag_sum) / (m - <NUM_LIT:1>)<EOL>
|
r"""
Implements Shorrocks mobility index
Parameters
-----------
A : array_like(float)
Square matrix with transition probabilities (mobility matrix) of
dimension m
Returns
--------
Shorrocks index: float
The Shorrocks mobility index calculated as
.. math::
s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1)
An index equal to 0 indicates complete immobility.
References
-----------
.. [1] Wealth distribution and social mobility in the US: A quantitative approach
(Benhabib, Bisin, Luo, 2017).
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
|
f5145:m2
|
@property<EOL><INDENT>def mean(self):<DEDENT>
|
n, a, b = self.n, self.a, self.b<EOL>return n * a / (a + b)<EOL>
|
mean
|
f5146:c0:m1
|
@property<EOL><INDENT>def std(self):<DEDENT>
|
return sqrt(self.var)<EOL>
|
standard deviation
|
f5146:c0:m2
|
@property<EOL><INDENT>def var(self):<DEDENT>
|
n, a, b = self.n, self.a, self.b<EOL>top = n*a*b * (a + b + n)<EOL>btm = (a+b)**<NUM_LIT> * (a+b+<NUM_LIT:1.0>)<EOL>return top / btm<EOL>
|
Variance
|
f5146:c0:m3
|
@property<EOL><INDENT>def skew(self):<DEDENT>
|
n, a, b = self.n, self.a, self.b<EOL>t1 = (a+b+<NUM_LIT:2>*n) * (b - a) / (a+b+<NUM_LIT:2>)<EOL>t2 = sqrt((<NUM_LIT:1>+a+b) / (n*a*b * (n+a+b)))<EOL>return t1 * t2<EOL>
|
skewness
|
f5146:c0:m4
|
def pdf(self):
|
n, a, b = self.n, self.a, self.b<EOL>k = np.arange(n + <NUM_LIT:1>)<EOL>probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)<EOL>return probs<EOL>
|
r"""
Generate the vector of probabilities for the Beta-binomial
(n, a, b) distribution.
The Beta-binomial distribution takes the form
.. math::
p(k \,|\, n, a, b) =
{n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)},
\qquad k = 0, \ldots, n,
where :math:`B` is the beta function.
Parameters
----------
n : scalar(int)
First parameter to the Beta-binomial distribution
a : scalar(float)
Second parameter to the Beta-binomial distribution
b : scalar(float)
Third parameter to the Beta-binomial distribution
Returns
-------
probs: array_like(float)
Vector of probabilities over k
|
f5146:c0:m5
|
def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2,<EOL>beta=<NUM_LIT:1.0>, tol=<NUM_LIT>, max_iter=<NUM_LIT:1000>, random_state=None):
|
<EOL>params = A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2<EOL>params = map(np.asarray, params)<EOL>A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 = params<EOL>A, B1, B2 = [np.sqrt(beta) * x for x in (A, B1, B2)]<EOL>n = A.shape[<NUM_LIT:0>]<EOL>if B1.ndim == <NUM_LIT:1>:<EOL><INDENT>k_1 = <NUM_LIT:1><EOL>B1 = np.reshape(B1, (n, <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>k_1 = B1.shape[<NUM_LIT:1>]<EOL><DEDENT>if B2.ndim == <NUM_LIT:1>:<EOL><INDENT>k_2 = <NUM_LIT:1><EOL>B2 = np.reshape(B2, (n, <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>k_2 = B2.shape[<NUM_LIT:1>]<EOL><DEDENT>random_state = check_random_state(random_state)<EOL>v1 = eye(k_1)<EOL>v2 = eye(k_2)<EOL>P1 = np.zeros((n, n))<EOL>P2 = np.zeros((n, n))<EOL>F1 = random_state.randn(k_1, n)<EOL>F2 = random_state.randn(k_2, n)<EOL>for it in range(max_iter):<EOL><INDENT>F10 = F1<EOL>F20 = F2<EOL>G2 = solve(dot(B2.T, P2.dot(B2))+Q2, v2)<EOL>G1 = solve(dot(B1.T, P1.dot(B1))+Q1, v1)<EOL>H2 = dot(G2, B2.T.dot(P2))<EOL>H1 = dot(G1, B1.T.dot(P1))<EOL>F1_left = v1 - dot(H1.dot(B2)+G1.dot(M1.T),<EOL>H2.dot(B1)+G2.dot(M2.T))<EOL>F1_right = H1.dot(A)+G1.dot(W1.T) - dot(H1.dot(B2)+G1.dot(M1.T),<EOL>H2.dot(A)+G2.dot(W2.T))<EOL>F1 = solve(F1_left, F1_right)<EOL>F2 = H2.dot(A)+G2.dot(W2.T) - dot(H2.dot(B1)+G2.dot(M2.T), F1)<EOL>Lambda1 = A - B2.dot(F2)<EOL>Lambda2 = A - B1.dot(F1)<EOL>Pi1 = R1 + dot(F2.T, S1.dot(F2))<EOL>Pi2 = R2 + dot(F1.T, S2.dot(F1))<EOL>P1 = dot(Lambda1.T, P1.dot(Lambda1)) + Pi1 -dot(dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1)<EOL>P2 = dot(Lambda2.T, P2.dot(Lambda2)) + Pi2 -dot(dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2)<EOL>dd = np.max(np.abs(F10 - F1)) + np.max(np.abs(F20 - F2))<EOL>if dd < tol: <EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise ValueError(msg.format(max_iter))<EOL><DEDENT>return F1, F2, P1, P2<EOL>
|
r"""
Compute the limit of a Nash linear quadratic dynamic game. In this
problem, player i minimizes
.. math::
\sum_{t=0}^{\infty}
\left\{
x_t' r_i x_t + 2 x_t' w_i
u_{it} +u_{it}' q_i u_{it} + u_{jt}' s_i u_{jt} + 2 u_{jt}'
m_i u_{it}
\right\}
subject to the law of motion
.. math::
x_{t+1} = A x_t + b_1 u_{1t} + b_2 u_{2t}
and a perceived control law :math:`u_j(t) = - f_j x_t` for the other
player.
The solution computed in this routine is the :math:`f_i` and
:math:`p_i` of the associated double optimal linear regulator
problem.
Parameters
----------
A : scalar(float) or array_like(float)
Corresponds to the above equation, should be of size (n, n)
B1 : scalar(float) or array_like(float)
As above, size (n, k_1)
B2 : scalar(float) or array_like(float)
As above, size (n, k_2)
R1 : scalar(float) or array_like(float)
As above, size (n, n)
R2 : scalar(float) or array_like(float)
As above, size (n, n)
Q1 : scalar(float) or array_like(float)
As above, size (k_1, k_1)
Q2 : scalar(float) or array_like(float)
As above, size (k_2, k_2)
S1 : scalar(float) or array_like(float)
As above, size (k_1, k_1)
S2 : scalar(float) or array_like(float)
As above, size (k_2, k_2)
W1 : scalar(float) or array_like(float)
As above, size (n, k_1)
W2 : scalar(float) or array_like(float)
As above, size (n, k_2)
M1 : scalar(float) or array_like(float)
As above, size (k_2, k_1)
M2 : scalar(float) or array_like(float)
As above, size (k_1, k_2)
beta : scalar(float), optional(default=1.0)
Discount rate
tol : scalar(float), optional(default=1e-8)
This is the tolerance level for convergence
max_iter : scalar(int), optional(default=1000)
This is the maximum number of iteratiosn allowed
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
F1 : array_like, dtype=float, shape=(k_1, n)
Feedback law for agent 1
F2 : array_like, dtype=float, shape=(k_2, n)
Feedback law for agent 2
P1 : array_like, dtype=float, shape=(n, n)
The steady-state solution to the associated discrete matrix
Riccati equation for agent 1
P2 : array_like, dtype=float, shape=(n, n)
The steady-state solution to the associated discrete matrix
Riccati equation for agent 2
|
f5147:m0
|
def write_version_py(filename=None):
|
doc = "<STR_LIT>"<EOL>doc += "<STR_LIT>" % VERSION<EOL>if not filename:<EOL><INDENT>filename = os.path.join(os.path.dirname(__file__), '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>fl = open(filename, '<STR_LIT:w>')<EOL>try:<EOL><INDENT>fl.write(doc)<EOL><DEDENT>finally:<EOL><INDENT>fl.close()<EOL><DEDENT>
|
This constructs a version file for the project
|
f5148:m0
|
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
|
block = []<EOL>lines = part.split('<STR_LIT:\n>')<EOL>N = len(lines)<EOL>i = <NUM_LIT:0><EOL>decorator = None<EOL>while <NUM_LIT:1>:<EOL><INDENT>if i==N:<EOL><INDENT>break<EOL><DEDENT>line = lines[i]<EOL>i += <NUM_LIT:1><EOL>line_stripped = line.strip()<EOL>if line_stripped.startswith('<STR_LIT:#>'):<EOL><INDENT>block.append((COMMENT, line))<EOL>continue<EOL><DEDENT>if line_stripped.startswith('<STR_LIT:@>'):<EOL><INDENT>decorator = line_stripped<EOL>continue<EOL><DEDENT>matchin = rgxin.match(line)<EOL>if matchin:<EOL><INDENT>lineno, inputline = int(matchin.group(<NUM_LIT:1>)), matchin.group(<NUM_LIT:2>)<EOL>continuation = '<STR_LIT>'% '<STR_LIT>'.join(['<STR_LIT:.>']*(len(str(lineno))+<NUM_LIT:2>))<EOL>Nc = len(continuation)<EOL>rest = []<EOL>while i<N:<EOL><INDENT>nextline = lines[i]<EOL>matchout = rgxout.match(nextline)<EOL>if matchout or nextline.startswith('<STR_LIT:#>'):<EOL><INDENT>break<EOL><DEDENT>elif nextline.startswith(continuation):<EOL><INDENT>inputline += '<STR_LIT:\n>' + nextline[Nc:]<EOL><DEDENT>else:<EOL><INDENT>rest.append(nextline)<EOL><DEDENT>i+= <NUM_LIT:1><EOL><DEDENT>block.append((INPUT, (decorator, inputline, '<STR_LIT:\n>'.join(rest))))<EOL>continue<EOL><DEDENT>matchout = rgxout.match(line)<EOL>if matchout:<EOL><INDENT>lineno, output = int(matchout.group(<NUM_LIT:1>)), matchout.group(<NUM_LIT:2>)<EOL>if i<N-<NUM_LIT:1>:<EOL><INDENT>output = '<STR_LIT:\n>'.join([output] + lines[i:])<EOL><DEDENT>block.append((OUTPUT, output))<EOL>break<EOL><DEDENT><DEDENT>return block<EOL>
|
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
|
f5151:m0
|
def process_input_line(self, line, store_history=True):
|
<EOL>stdout = sys.stdout<EOL>splitter = self.IP.input_splitter<EOL>try:<EOL><INDENT>sys.stdout = self.cout<EOL>splitter.push(line)<EOL>more = splitter.push_accepts_more()<EOL>if not more:<EOL><INDENT>source_raw = splitter.source_raw_reset()[<NUM_LIT:1>]<EOL>self.IP.run_cell(source_raw, store_history=store_history)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>sys.stdout = stdout<EOL><DEDENT>
|
process the input, capturing stdout
|
f5151:c0:m2
|
def process_image(self, decorator):
|
savefig_dir = self.savefig_dir<EOL>source_dir = self.source_dir<EOL>saveargs = decorator.split('<STR_LIT:U+0020>')<EOL>filename = saveargs[<NUM_LIT:1>]<EOL>outfile = os.path.relpath(os.path.join(savefig_dir,filename),<EOL>source_dir)<EOL>imagerows = ['<STR_LIT>'%outfile]<EOL>for kwarg in saveargs[<NUM_LIT:2>:]:<EOL><INDENT>arg, val = kwarg.split('<STR_LIT:=>')<EOL>arg = arg.strip()<EOL>val = val.strip()<EOL>imagerows.append('<STR_LIT>'%(arg, val))<EOL><DEDENT>image_file = os.path.basename(outfile) <EOL>image_directive = '<STR_LIT:\n>'.join(imagerows)<EOL>return image_file, image_directive<EOL>
|
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
|
f5151:c0:m3
|
def process_input(self, data, input_prompt, lineno):
|
decorator, input, rest = data<EOL>image_file = None<EOL>image_directive = None<EOL>is_verbatim = decorator=='<STR_LIT>' or self.is_verbatim<EOL>is_doctest = decorator=='<STR_LIT>' or self.is_doctest<EOL>is_suppress = decorator=='<STR_LIT>' or self.is_suppress<EOL>is_okexcept = decorator=='<STR_LIT>' or self.is_okexcept<EOL>is_savefig = decorator is not None anddecorator.startswith('<STR_LIT>')<EOL>input_lines = input.split('<STR_LIT:\n>')<EOL>self.datacontent = data<EOL>continuation = '<STR_LIT>'%'<STR_LIT>'.join(['<STR_LIT:.>']*(len(str(lineno))+<NUM_LIT:2>))<EOL>if is_savefig:<EOL><INDENT>image_file, image_directive = self.process_image(decorator)<EOL><DEDENT>ret = []<EOL>is_semicolon = False<EOL>store_history = True<EOL>for i, line in enumerate(input_lines):<EOL><INDENT>if line.endswith('<STR_LIT:;>'):<EOL><INDENT>is_semicolon = True<EOL><DEDENT>if is_semicolon or is_suppress:<EOL><INDENT>store_history = False<EOL><DEDENT>if i==<NUM_LIT:0>:<EOL><INDENT>if is_verbatim:<EOL><INDENT>self.process_input_line('<STR_LIT>')<EOL>self.IP.execution_count += <NUM_LIT:1> <EOL><DEDENT>else:<EOL><INDENT>self.process_input_line(line, store_history=store_history)<EOL><DEDENT>formatted_line = '<STR_LIT>'%(input_prompt, line)<EOL><DEDENT>else:<EOL><INDENT>if not is_verbatim:<EOL><INDENT>self.process_input_line(line, store_history=store_history)<EOL><DEDENT>formatted_line = '<STR_LIT>'%(continuation, line)<EOL><DEDENT>if not is_suppress:<EOL><INDENT>ret.append(formatted_line)<EOL><DEDENT><DEDENT>if not is_suppress:<EOL><INDENT>if len(rest.strip()):<EOL><INDENT>if is_verbatim:<EOL><INDENT>ret.append(rest)<EOL><DEDENT><DEDENT><DEDENT>self.cout.seek(<NUM_LIT:0>)<EOL>output = self.cout.read()<EOL>if not is_suppress and not is_semicolon:<EOL><INDENT>ret.append(output.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>if not is_okexcept and "<STR_LIT>" in output:<EOL><INDENT>sys.stdout.write(output)<EOL><DEDENT>self.cout.truncate(<NUM_LIT:0>)<EOL>return (ret, input_lines, output, is_doctest, image_file,<EOL>image_directive)<EOL>
|
Process data block for INPUT token.
|
f5151:c0:m4
|
def process_comment(self, data):
|
if not self.is_suppress:<EOL><INDENT>return [data]<EOL><DEDENT>
|
Process data fPblock for COMMENT token.
|
f5151:c0:m6
|
def save_image(self, image_file):
|
self.ensure_pyplot()<EOL>command = ('<STR_LIT>'<EOL>'<STR_LIT>' % image_file)<EOL>self.process_input_line('<STR_LIT>', store_history=False)<EOL>self.process_input_line('<STR_LIT>', store_history=False)<EOL>self.process_input_line(command, store_history=False)<EOL>self.process_input_line('<STR_LIT>', store_history=False)<EOL>self.process_input_line('<STR_LIT>', store_history=False)<EOL>self.clear_cout()<EOL>
|
Saves the image file to disk.
|
f5151:c0:m7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.