repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/__init__.py | """Public-facing objects."""
from . import estimation, utilities, grids, interpolate, misc, hetblocks
from .blocks.simple_block import simple
from .blocks.het_block import het
from .blocks.solved_block import solved
from .blocks.combined_block import combine, create_model
from .blocks.support.simple_displacement import apply_function
from .classes.steady_state_dict import SteadyStateDict
from .classes.impulse_dict import ImpulseDict
from .classes.jacobian_dict import JacobianDict
from .utilities.drawdag import drawdag
# Ensure warning uniformity across package
import warnings
# Force warnings.warn() to omit the source code line in the message
formatwarning_orig = warnings.formatwarning
warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
formatwarning_orig(message, category, filename, lineno, line='')
# deprecation of old ways for calling things
def agrid(*args, **kwargs):
warnings.warn("The function 'agrid' is deprecated and will be removed in a subsequent version.\n"
"Please call sj.grids.asset_grid(amin, amax, n) instead.")
return utilities.discretize.agrid(*args, **kwargs)
def markov_rouwenhorst(*args, **kwargs):
warnings.warn("Calling sj.markov_rouwenhorst() is deprecated and will be disallowed in a subsequent version.\n"
"Please call sj.grids.markov_rouwenhorst() instead.")
return grids.markov_rouwenhorst(*args, **kwargs)
def markov_tauchen(*args, **kwargs):
warnings.warn("Calling sj.markov_tauchen() is deprecated and will be disallowed in a subsequent version.\n"
"Please call sj.grids.markov_tauchen() instead.")
return grids.markov_tauchen(*args, **kwargs)
def interpolate_y(*args, **kwargs):
warnings.warn("Calling sj.interpolate_y() is deprecated and will be disallowed in a subsequent version.\n"
"Please call sj.interpolate.interpolate_y() instead.")
return interpolate.interpolate_y(*args, **kwargs)
def setmin(*args, **kwargs):
warnings.warn("Calling sj.setmin() is deprecated and will be disallowed in a subsequent version.\n"
"Please call sj.misc.setmin() instead.")
misc.setmin(*args, **kwargs)
| 2,209 | 45.041667 | 115 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/interpolate.py | from .utilities.interpolate import *
| 37 | 18 | 36 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/steady_state_dict.py | from copy import deepcopy
from .result_dict import ResultDict
from ..utilities.misc import dict_diff
from ..utilities.ordered_set import OrderedSet
from ..utilities.bijection import Bijection
import numpy as np
from numbers import Real
from typing import Any, Dict, Union
Array = Any
class SteadyStateDict(ResultDict):
def difference(self, data_to_remove):
return SteadyStateDict(dict_diff(self.toplevel, data_to_remove), deepcopy(self.internals))
def _vector_valued(self):
return OrderedSet([k for k, v in self.toplevel.items() if np.size(v) > 1])
UserProvidedSS = Dict[str, Union[Real, Array]]
| 626 | 27.5 | 98 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/jacobian_dict.py | import copy
import warnings
import numpy as np
from ..utilities.misc import factor, factored_solve
from ..utilities.ordered_set import OrderedSet
from ..utilities.bijection import Bijection
from .impulse_dict import ImpulseDict
from .sparse_jacobians import IdentityMatrix, SimpleSparse, make_matrix
from typing import Any, Dict, Union
Array = Any
Jacobian = Union[np.ndarray, IdentityMatrix, SimpleSparse]
class NestedDict:
def __init__(self, nesteddict, outputs: OrderedSet=None, inputs: OrderedSet=None, name: str=None):
if isinstance(nesteddict, NestedDict):
self.nesteddict = nesteddict.nesteddict
self.outputs: OrderedSet = nesteddict.outputs
self.inputs: OrderedSet = nesteddict.inputs
self.name: str = nesteddict.name
else:
self.nesteddict = nesteddict
if outputs is None:
outputs = OrderedSet(nesteddict.keys())
if inputs is None:
inputs = OrderedSet([])
for v in nesteddict.values():
inputs |= v
if not outputs or not inputs:
outputs = OrderedSet([])
inputs = OrderedSet([])
self.outputs = OrderedSet(outputs)
self.inputs = OrderedSet(inputs)
if name is None:
# TODO: Figure out better default naming scheme for NestedDicts
self.name = "NestedDict"
else:
self.name = name
def __repr__(self):
return f'<{type(self).__name__} outputs={self.outputs}, inputs={self.inputs}>'
def __iter__(self):
return iter(self.outputs)
def __or__(self, other):
# non-in-place merge: make a copy, then update
merged = type(self)(self.nesteddict, self.outputs, self.inputs)
merged.update(other)
return merged
def __getitem__(self, x):
if isinstance(x, str):
# case 1: just a single output, give subdict
return self.nesteddict[x]
elif isinstance(x, tuple):
# case 2: tuple, referring to output and input
o, i = x
o = self.outputs if o == slice(None, None, None) else o
i = self.inputs if i == slice(None, None, None) else i
if isinstance(o, str):
if isinstance(i, str):
# case 2a: one output, one input, return single Jacobian
return self.nesteddict[o][i]
else:
# case 2b: one output, multiple inputs, return dict
return subdict(self.nesteddict[o], i)
else:
# case 2c: multiple outputs, one or more inputs, return NestedDict with outputs o and inputs i
i = (i,) if isinstance(i, str) else i
return type(self)({oo: subdict(self.nesteddict[oo], i) for oo in o}, o, i)
elif isinstance(x, OrderedSet) or isinstance(x, list) or isinstance(x, set):
# case 3: assume that list or set refers just to outputs, get all of those
return type(self)({oo: self.nesteddict[oo] for oo in x}, x, self.inputs)
else:
raise ValueError(f'Tried to get impermissible item {x}')
def get(self, *args, **kwargs):
# this is for compatibility, not a huge fan
return self.nesteddict.get(*args, **kwargs)
def update(self, J):
if not J.outputs or not J.inputs:
return
if set(self.inputs) != set(J.inputs):
raise ValueError \
(f'Cannot merge {type(self).__name__}s with non-overlapping inputs {set(self.inputs) ^ set(J.inputs)}')
if not set(self.outputs).isdisjoint(J.outputs):
raise ValueError \
(f'Cannot merge {type(self).__name__}s with overlapping outputs {set(self.outputs) & set(J.outputs)}')
self.outputs = self.outputs | J.outputs
self.nesteddict = {**self.nesteddict, **J.nesteddict}
# Ensure that every output in self has either a Jacobian or filler value for each input,
# s.t. all inputs map to all outputs
def complete(self, filler):
nesteddict = {}
for o in self.outputs:
nesteddict[o] = dict(self.nesteddict[o])
for i in self.inputs:
if i not in nesteddict[o]:
nesteddict[o][i] = filler
return type(self)(nesteddict, self.outputs, self.inputs)
def deduplicate(mylist):
"""Remove duplicates while otherwise maintaining order"""
return list(dict.fromkeys(mylist))
def subdict(d, ks):
"""Return subdict of d with only keys in ks (if some ks are not in d, ignore them)"""
return {k: d[k] for k in ks if k in d}
class JacobianDict(NestedDict):
def __init__(self, nesteddict, outputs=None, inputs=None, name=None, T=None, check=False):
if check:
ensure_valid_jacobiandict(nesteddict)
super().__init__(nesteddict, outputs=outputs, inputs=inputs, name=name)
self.T = T
@staticmethod
def identity(ks):
return JacobianDict({k: {k: IdentityMatrix()} for k in ks}, ks, ks)
def addinputs(self):
"""Add any inputs that were not already in output list as outputs, with the identity"""
inputs = [x for x in self.inputs if x not in self.outputs]
return self | JacobianDict.identity(inputs)
def __matmul__(self, x):
if isinstance(x, JacobianDict):
return self.compose(x)
elif isinstance(x, Bijection):
return self.remap(x)
else:
return self.apply(x)
def __rmatmul__(self, x):
if isinstance(x, Bijection):
return self.remap(x)
def remap(self, x: Bijection):
if not x:
return self
nesteddict = x @ self.nesteddict
for o in nesteddict.keys():
nesteddict[o] = x @ nesteddict[o]
return JacobianDict(nesteddict, inputs=x @ self.inputs, outputs=x @ self.outputs)
def __bool__(self):
return bool(self.outputs) and bool(self.inputs)
def compose(self, J):
"""Returns self @ J"""
if self.T is not None and J.T is not None and self.T != J.T:
raise ValueError(f'Trying to multiply JacobianDicts with inconsistent dimensions {self.T} and {J.T}')
o_list = self.outputs
m_list = tuple(set(self.inputs) & set(J.outputs))
i_list = J.inputs
J_om = self.nesteddict
J_mi = J.nesteddict
J_oi = {}
for o in o_list:
J_oi[o] = {}
for i in i_list:
Jout = None
for m in m_list:
if m in J_om[o] and i in J_mi[m]:
if Jout is None:
Jout = J_om[o][m] @ J_mi[m][i]
else:
Jout += J_om[o][m] @ J_mi[m][i]
if Jout is not None:
J_oi[o][i] = Jout
return JacobianDict(J_oi, o_list, i_list)
def apply(self, x: Union[ImpulseDict, Dict[str, Array]]):
"""Returns J @ x"""
x = ImpulseDict(x)
inputs = x.keys() & set(self.inputs)
J_oi = self.nesteddict
y = {}
for o in self.outputs:
y[o] = np.zeros(x.T)
J_i = J_oi[o]
for i in inputs:
if i in J_i:
y[o] += J_i[i] @ x[i]
return x | ImpulseDict(y, T=x.T)
def pack(self, T=None):
if T is None:
if self.T is not None:
T = self.T
else:
raise ValueError('Trying to pack {self} into matrix, but do not know {T}')
else:
if self.T is not None and T != self.T:
raise ValueError('{self} has dimension {self.T}, but trying to pack it with alternate dimension {T}')
J = np.empty((len(self.outputs) * T, len(self.inputs) * T))
for iO, O in enumerate(self.outputs):
for iI, I in enumerate(self.inputs):
J_OI = self[O].get(I)
if J_OI is not None:
J[(T * iO):(T * (iO + 1)), (T * iI):(T * (iI + 1))] = make_matrix(J_OI, T)
else:
J[(T * iO):(T * (iO + 1)), (T * iI):(T * (iI + 1))] = 0
return J
@staticmethod
def unpack(bigjac, outputs, inputs, T):
"""If we have an (nO*T)*(nI*T) jacobian and provide names of nO outputs and nI inputs, output nested dictionary"""
jacdict = {}
for iO, O in enumerate(outputs):
jacdict[O] = {}
for iI, I in enumerate(inputs):
jacdict[O][I] = bigjac[(T * iO):(T * (iO + 1)), (T * iI):(T * (iI + 1))]
return JacobianDict(jacdict, outputs, inputs, T=T)
def factored(self, T=None):
return FactoredJacobianDict(self, T)
class FactoredJacobianDict:
def __init__(self, jacobian_dict: JacobianDict, T=None):
if jacobian_dict.T is None:
if T is None:
raise ValueError(f'Trying to factor (solve) {jacobian_dict} but do not know T')
self.T = T
else:
self.T = jacobian_dict.T
H_U = jacobian_dict.pack(T)
self.targets = jacobian_dict.outputs
self.unknowns = jacobian_dict.inputs
if len(self.targets) != len(self.unknowns):
raise ValueError('Trying to factor JacobianDict unequal number of inputs (unknowns)'
f' {self.unknowns} and outputs (targets) {self.targets}')
self.H_U_factored = factor(H_U)
def __repr__(self):
return f'<{type(self).__name__} unknowns={self.unknowns}, targets={self.targets}>'
# TODO: test this
def to_jacobian_dict(self):
return JacobianDict.unpack(-factored_solve(self.H_U_factored, np.eye(self.T*len(self.unknowns))),
self.unknowns, self.targets, self.T)
def __matmul__(self, x):
if isinstance(x, JacobianDict):
return self.compose(x)
elif isinstance(x, Bijection):
return self.remap(x)
else:
return self.apply(x)
def __rmatmul__(self, x):
if isinstance(x, Bijection):
return self.remap(x)
def remap(self, x: Bijection):
if not x:
return self
newself = copy.copy(self)
newself.unknowns = x @ self.unknowns
newself.targets = x @ self.targets
return newself
def compose(self, J: JacobianDict):
"""Returns = -H_U^{-1} @ J"""
Jsub = J[[o for o in self.targets if o in J.outputs]].pack(self.T)
out = -factored_solve(self.H_U_factored, Jsub)
return JacobianDict.unpack(out, self.unknowns, J.inputs, self.T)
def apply(self, x: Union[ImpulseDict, Dict[str, Array]]):
"""Returns -H_U^{-1} @ x"""
xsub = ImpulseDict(x).get(self.targets).pack()
out = -factored_solve(self.H_U_factored, xsub)
return ImpulseDict.unpack(out, self.unknowns, self.T)
def ensure_valid_jacobiandict(d):
"""The valid structure of `d` is a Dict[str, Dict[str, Jacobian]], where calling `d[o][i]` yields a
Jacobian of type Jacobian mapping sequences of `i` to sequences of `o`. The null type for `d` is assumed
to be {}, which is permitted the empty version of a valid nested dict."""
if d and not isinstance(d, JacobianDict):
# Assume it's sufficient to just check one of the keys
if not isinstance(next(iter(d.keys())), str):
raise ValueError(f"The dict argument {d} must have keys with type `str` to indicate `output` names.")
jac_o_dict = next(iter(d.values()))
if isinstance(jac_o_dict, dict):
if jac_o_dict:
if not isinstance(next(iter(jac_o_dict.keys())), str):
raise ValueError(f"The values of the dict argument {d} must be dicts with keys of type `str` to indicate"
f" `input` names.")
jac_o_i = next(iter(jac_o_dict.values()))
if not isinstance(jac_o_i, Jacobian):
raise ValueError(f"The dict argument {d}'s values must be dicts with values of type `Jacobian`.")
else:
if isinstance(jac_o_i, np.ndarray) and np.shape(jac_o_i)[0] != np.shape(jac_o_i)[1]:
raise ValueError(f"The Jacobians in {d} must be square matrices of type `Jacobian`.")
else:
raise ValueError(f"The argument {d} must be of type `dict`, with keys of type `str` and"
f" values of type `Jacobian`.")
def verify_saved_jacobian(block_name, Js, outputs, inputs, T):
"""Verify that pre-computed Jacobian has all the right outputs, inputs, and length."""
if block_name not in Js.keys():
# don't throw warning, this will happen often for simple blocks
return False
J = Js[block_name]
if not isinstance(J, JacobianDict):
warnings.warn(f'Js[{block_name}] is not a JacobianDict.')
return False
if not set(outputs).issubset(set(J.outputs)):
missing = set(outputs).difference(set(J.outputs))
warnings.warn(f'Js[{block_name}] misses required outputs {missing}.')
return False
if not set(inputs).issubset(set(J.inputs)):
missing = set(inputs).difference(set(J.inputs))
warnings.warn(f'Js[{block_name}] misses required inputs {missing}.')
return False
# Jacobian of simple blocks may have a sparse representation
if T is not None:
Tsaved = J[J.outputs[0]][J.inputs[0]].shape[-1]
if T != Tsaved:
warnings.warn(f'Js[{block_name} has length {Tsaved}, but you asked for {T}')
return False
return True
| 13,815 | 38.25 | 125 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/result_dict.py | import copy
from ..utilities.bijection import Bijection
class ResultDict:
def __init__(self, data, internals=None):
if isinstance(data, ResultDict):
if internals is not None:
raise ValueError(f'Supplying {type(self).__name__} and also internals to constructor not allowed')
self.toplevel = data.toplevel.copy()
self.internals = data.internals.copy()
else:
self.toplevel: dict = data.copy()
self.internals: dict = {} if internals is None else internals.copy()
def __repr__(self):
if self.internals:
return f"<{type(self).__name__}: {list(self.toplevel.keys())}, internals={list(self.internals.keys())}>"
else:
return f"<{type(self).__name__}: {list(self.toplevel.keys())}>"
def __iter__(self):
return iter(self.toplevel)
def __getitem__(self, k, **kwargs):
if isinstance(k, str):
return self.toplevel[k]
elif isinstance(k, tuple):
raise TypeError(f'Key {k} to {type(self).__name__} cannot be tuple')
else:
try:
return type(self)({ki: self.toplevel[ki] for ki in k}, **kwargs)
except TypeError:
raise TypeError(f'Key {k} to {type(self).__name__} needs to be a string or an iterable (list, set, etc) of strings')
def __setitem__(self, k, v):
self.toplevel[k] = v
def __matmul__(self, x):
# remap keys in toplevel
if isinstance(x, Bijection):
new = copy.deepcopy(self)
new.toplevel = x @ self.toplevel
return new
else:
return NotImplemented
def __rmatmul__(self, x):
return self.__matmul__(x)
def __len__(self):
return len(self.toplevel)
def __or__(self, other):
if not isinstance(other, type(self)):
raise ValueError(f'Trying to merge a {type(self).__name__} with a {type(other).__name__}.')
merged = self.copy()
merged.update(other)
return merged
def keys(self):
return self.toplevel.keys()
def values(self):
return self.toplevel.values()
def items(self):
return self.toplevel.items()
def update(self, rdict):
if isinstance(rdict, ResultDict):
self.toplevel.update(rdict.toplevel)
self.internals.update(rdict.internals)
else:
self.toplevel.update(dict(rdict))
def copy(self):
return type(self)(self)
| 2,557 | 31.379747 | 132 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/sparse_jacobians.py | import numpy as np
from numba import njit
import copy
class IdentityMatrix:
"""Simple identity matrix class, cheaper than using actual np.eye(T) matrix,
use to initialize Jacobian of a variable wrt itself"""
__array_priority__ = 10_000
def sparse(self):
"""Equivalent SimpleSparse representation, less efficient operations but more general."""
return SimpleSparse({(0, 0): 1})
def matrix(self, T):
return np.eye(T)
def __matmul__(self, other):
"""Identity matrix knows to simply return 'other' whenever it's multiplied by 'other'."""
return copy.deepcopy(other)
def __rmatmul__(self, other):
return copy.deepcopy(other)
def __mul__(self, a):
return a*self.sparse()
def __rmul__(self, a):
return self.sparse()*a
def __add__(self, x):
return self.sparse() + x
def __radd__(self, x):
return x + self.sparse()
def __sub__(self, x):
return self.sparse() - x
def __rsub__(self, x):
return x - self.sparse()
def __neg__(self):
return -self.sparse()
def __pos__(self):
return self
def __repr__(self):
return 'IdentityMatrix'
class SimpleSparse:
"""Efficient representation of sparse linear operators, which are linear combinations of basis
operators represented by pairs (i, m), where i is the index of diagonal on which there are 1s
(measured by # above main diagonal) and m is number of initial entries missing.
Examples of such basis operators:
- (0, 0) is identity operator
- (0, 2) is identity operator with first two '1's on main diagonal missing
- (1, 0) has 1s on diagonal above main diagonal: "left-shift" operator
- (-1, 1) has 1s on diagonal below main diagonal, except first column
The linear combination of these basis operators that makes up a given SimpleSparse object is
stored as a dict 'elements' mapping (i, m) -> x.
The Jacobian of a SimpleBlock is a SimpleSparse operator combining basis elements (i, 0). We need
the more general basis (i, m) to ensure closure under multiplication.
These (i, m) correspond to the Q_(-i, m) operators defined for Proposition 2 of the Sequence Space
Jacobian paper. The flipped sign in the code is so that the index 'i' matches the k(i) notation
for writing SimpleBlock functions.
The "dunder" methods x.__add__(y), x.__matmul__(y), x.__rsub__(y), etc. in Python implement infix
operations x + y, x @ y, y - x, etc. Defining these allows us to use these more-or-less
interchangeably with ordinary NumPy matrices.
"""
# when performing binary operations on SimpleSparse and a NumPy array, use SimpleSparse's rules
__array_priority__ = 1000
def __init__(self, elements):
self.elements = elements
self.indices, self.xs = None, None
@staticmethod
def from_simple_diagonals(elements):
"""Take dict i -> x, i.e. from SimpleBlock differentiation, convert to SimpleSparse (i, 0) -> x"""
return SimpleSparse({(i, 0): x for i, x in elements.items()})
def matrix(self, T):
"""Return matrix giving first T rows and T columns of matrix representation of SimpleSparse"""
return self + np.zeros((T, T))
def array(self):
"""Rewrite dict (i, m) -> x as pair of NumPy arrays, one size-N*2 array of ints with rows (i, m)
and one size-N array of floats with entries x.
This is needed for Numba to take as input. Cache for efficiency.
"""
if self.indices is not None:
return self.indices, self.xs
else:
indices, xs = zip(*self.elements.items())
self.indices, self.xs = np.array(indices), np.array(xs)
return self.indices, self.xs
@property
def T(self):
"""Transpose"""
return SimpleSparse({(-i, m): x for (i, m), x in self.elements.items()})
@property
def iszero(self):
return not self.nonzero().elements
def nonzero(self):
elements = self.elements.copy()
for im, x in self.elements.items():
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
return SimpleSparse(elements)
def __pos__(self):
return self
def __neg__(self):
return SimpleSparse({im: -x for im, x in self.elements.items()})
def __matmul__(self, A):
if isinstance(A, SimpleSparse):
# multiply SimpleSparse by SimpleSparse, simple analytical rules in multiply_rs_rs
return SimpleSparse(multiply_rs_rs(self, A))
elif isinstance(A, np.ndarray):
# multiply SimpleSparse by matrix or vector, multiply_rs_matrix uses slicing
indices, xs = self.array()
if A.ndim == 2:
return multiply_rs_matrix(indices, xs, A)
elif A.ndim == 1:
return multiply_rs_matrix(indices, xs, A[:, np.newaxis])[:, 0]
else:
return NotImplemented
else:
return NotImplemented
def __rmatmul__(self, A):
# multiplication rule when this object is on right (will only be called when left is matrix)
# for simplicity, just use transpose to reduce this to previous cases
return (self.T @ A.T).T
def __add__(self, A):
if isinstance(A, SimpleSparse):
# add SimpleSparse to SimpleSparse, combining dicts, summing x when (i, m) overlap
elements = self.elements.copy()
for im, x in A.elements.items():
if im in elements:
elements[im] += x
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
else:
elements[im] = x
return SimpleSparse(elements)
else:
# add SimpleSparse to T*T matrix
if not isinstance(A, np.ndarray) or A.ndim != 2 or A.shape[0] != A.shape[1]:
return NotImplemented
T = A.shape[0]
# fancy trick to do this efficiently by writing A as flat vector
# then (i, m) can be mapped directly to NumPy slicing!
A = A.flatten() # use flatten, not ravel, since we'll modify A and want a copy
for (i, m), x in self.elements.items():
if i < 0:
A[T * (-i) + (T + 1) * m::T + 1] += x
else:
A[i + (T + 1) * m:(T - i) * T:T + 1] += x
return A.reshape((T, T))
def __radd__(self, A):
try:
return self + A
except:
print(self)
print(A)
raise
def __sub__(self, A):
# slightly inefficient implementation with temporary for simplicity
return self + (-A)
def __rsub__(self, A):
return -self + A
def __mul__(self, a):
if not np.isscalar(a):
return NotImplemented
return SimpleSparse({im: a * x for im, x in self.elements.items()})
def __rmul__(self, a):
return self * a
def __repr__(self):
formatted = '{' + ', '.join(f'({i}, {m}): {x:.3f}' for (i, m), x in self.elements.items()) + '}'
return f'SimpleSparse({formatted})'
def __eq__(self, s):
return self.elements == s.elements
def multiply_basis(t1, t2):
"""Matrix multiplication operation mapping two sparse basis elements to another."""
# equivalent to formula in Proposition 2 of Sequence Space Jacobian paper, but with
# signs of i and j flipped to reflect different sign convention used here
i, m = t1
j, n = t2
k = i + j
if i >= 0:
if j >= 0:
l = max(m, n - i)
elif k >= 0:
l = max(m, n - k)
else:
l = max(m + k, n)
else:
if j <= 0:
l = max(m + j, n)
else:
l = max(m, n) + min(-i, j)
return k, l
def multiply_rs_rs(s1, s2):
"""Matrix multiplication operation on two SimpleSparse objects."""
# iterate over all pairs (i, m) -> x and (j, n) -> y in objects,
# add all pairwise products to get overall product
elements = {}
for im, x in s1.elements.items():
for jn, y in s2.elements.items():
kl = multiply_basis(im, jn)
if kl in elements:
elements[kl] += x * y
else:
elements[kl] = x * y
return elements
@njit
def multiply_rs_matrix(indices, xs, A):
"""Matrix multiplication of SimpleSparse object ('indices' and 'xs') and matrix A.
Much more computationally demanding than multiplying two SimpleSparse (which is almost
free with simple analytical formula), so we implement as jitted function."""
n = indices.shape[0]
T = A.shape[0]
S = A.shape[1]
Aout = np.zeros((T, S))
for count in range(n):
# for Numba to jit easily, SimpleSparse with basis elements '(i, m)' with coefs 'x'
# was stored in 'indices' and 'xs'
i = indices[count, 0]
m = indices[count, 1]
x = xs[count]
# loop faster than vectorized when jitted
# directly use def of basis element (i, m), displacement of i and ignore first m
if i == 0:
for t in range(m, T):
for s in range(S):
Aout[t, s] += x * A[t, s]
elif i > 0:
for t in range(m, T - i):
for s in range(S):
Aout[t, s] += x * A[t + i, s]
else:
for t in range(m - i, T):
for s in range(S):
Aout[t, s] += x * A[t + i, s]
return Aout
def make_matrix(A, T):
"""If A is not an outright ndarray, e.g. it is SimpleSparse, call its .matrix(T) method
to convert it to T*T array."""
if not isinstance(A, np.ndarray):
return A.matrix(T)
else:
return A
| 10,126 | 34.041522 | 106 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/__init__.py | from .steady_state_dict import SteadyStateDict, UserProvidedSS
from .impulse_dict import ImpulseDict
from .jacobian_dict import JacobianDict, FactoredJacobianDict
from .sparse_jacobians import IdentityMatrix, SimpleSparse
| 222 | 43.6 | 62 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/classes/impulse_dict.py | """ImpulseDict class for manipulating impulse responses."""
import numpy as np
from .result_dict import ResultDict
from ..utilities.ordered_set import OrderedSet
from ..utilities.bijection import Bijection
from .steady_state_dict import SteadyStateDict
class ImpulseDict(ResultDict):
def __init__(self, data, internals=None, T=None):
if isinstance(data, ImpulseDict):
if internals is not None or T is not None:
raise ValueError('Supplying ImpulseDict and also internal or T to constructor not allowed')
super().__init__(data)
self.T = data.T
else:
if not isinstance(data, dict):
raise ValueError('ImpulseDicts are initialized with a `dict` of top-level impulse responses.')
super().__init__(data, internals)
self.T = (T if T is not None else self.infer_length())
def __getitem__(self, k):
return super().__getitem__(k, T=self.T)
def __add__(self, other):
return self.binary_operation(other, lambda a, b: a + b)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.binary_operation(other, lambda a, b: a - b)
def __rsub__(self, other):
return self.binary_operation(other, lambda a, b: b - a)
def __mul__(self, other):
return self.binary_operation(other, lambda a, b: a * b)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.binary_operation(other, lambda a, b: a / b)
def __rtruediv__(self, other):
return self.binary_operation(other, lambda a, b: b / a)
def __neg__(self):
return self.unary_operation(lambda a: -a)
def __pos__(self):
return self
def __abs__(self):
return self.unary_operation(lambda a: abs(a))
def binary_operation(self, other, op):
if isinstance(other, (SteadyStateDict, ImpulseDict)):
toplevel = {k: op(v, other[k]) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
other_internals = other.internals[b]
internals[b] = {k: op(v, other_internals[k]) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
elif isinstance(other, (float, int)):
toplevel = {k: op(v, other) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
internals[b] = {k: op(v, other) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
else:
return NotImplementedError(f'Can only perform operations with ImpulseDicts and other ImpulseDicts, SteadyStateDicts, or numbers, not {type(other).__name__}')
def unary_operation(self, op):
toplevel = {k: op(v) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
internals[b] = {k: op(v) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
def pack(self):
T = self.T
bigv = np.empty(T*len(self.toplevel))
for i, v in enumerate(self.toplevel.values()):
bigv[i*T:(i+1)*T] = v
return bigv
@staticmethod
def unpack(bigv, outputs, T):
impulse = {}
for i, o in enumerate(outputs):
impulse[o] = bigv[i*T:(i+1)*T]
return ImpulseDict(impulse, T=T)
def infer_length(self):
lengths = [len(v) for v in self.toplevel.values()]
length = max(lengths)
if length != min(lengths):
raise ValueError(f'Building ImpulseDict with inconsistent lengths {max(lengths)} and {min(lengths)}')
return length
def get(self, k):
"""Like __getitem__ but with default of zero impulse"""
if isinstance(k, str):
return self.toplevel.get(k, np.zeros(self.T))
elif isinstance(k, tuple):
raise TypeError(f'Key {k} to {type(self).__name__} cannot be tuple')
else:
try:
return type(self)({ki: self.toplevel.get(ki, np.zeros(self.T)) for ki in k}, T=self.T)
except TypeError:
raise TypeError(f'Key {k} to {type(self).__name__} needs to be a string or an iterable (list, set, etc) of strings')
| 4,427 | 37.172414 | 169 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/examples/krusell_smith.py | from sequence_jacobian import grids, simple, create_model, hetblocks
hh = hetblocks.hh_sim.hh
'''Part 1: Blocks'''
@simple
def firm(K, L, Z, alpha, delta):
r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta
w = (1 - alpha) * Z * (K(-1) / L) ** alpha
Y = Z * K(-1) ** alpha * L ** (1 - alpha)
return r, w, Y
@simple
def mkt_clearing(K, A, Y, C, delta):
asset_mkt = A - K
I = K - (1 - delta) * K(-1)
goods_mkt = Y - C - I
return asset_mkt, goods_mkt, I
@simple
def firm_ss(r, Y, L, delta, alpha):
'''Solve for (Z, K) given targets for (Y, r).'''
rk = r + delta
K = alpha * Y / rk
Z = Y / K ** alpha / L ** (1 - alpha)
w = (1 - alpha) * Z * (K / L) ** alpha
return K, Z, w
'''Part 2: Embed HA block'''
def make_grids(rho, sigma, nS, amax, nA):
e_grid, _, Pi = grids.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS)
a_grid = grids.agrid(amax=amax, n=nA)
return e_grid, Pi, a_grid
def income(w, e_grid):
y = w * e_grid
return y
'''Part 3: DAG'''
def dag():
# Combine blocks
household = hh.add_hetinputs([income, make_grids])
ks_model = create_model([household, firm, mkt_clearing], name="Krusell-Smith")
ks_model_ss = create_model([household, firm_ss, mkt_clearing], name="Krusell-Smith SS")
# Steady state
calibration = {'eis': 1.0, 'delta': 0.025, 'alpha': 0.11, 'rho': 0.966, 'sigma': 0.5,
'Y': 1.0, 'L': 1.0, 'nS': 2, 'nA': 10, 'amax': 200, 'r': 0.01}
unknowns_ss = {'beta': (0.98 / 1.01, 0.999 / 1.01)}
targets_ss = {'asset_mkt': 0.}
ss = ks_model_ss.solve_steady_state(calibration, unknowns_ss, targets_ss, solver='brentq')
# Transitional dynamics
inputs = ['Z']
unknowns = ['K']
targets = ['asset_mkt']
return ks_model_ss, ss, ks_model, unknowns, targets, inputs
'''Part 3: Permanent beta heterogeneity'''
@simple
def aggregate(A_patient, A_impatient, C_patient, C_impatient, mass_patient):
C = mass_patient * C_patient + (1 - mass_patient) * C_impatient
A = mass_patient * A_patient + (1 - mass_patient) * A_impatient
return C, A
def remapped_dag():
# Create 2 versions of the household block using `remap`
household = hh.household.add_hetinputs([income, make_grids])
to_map = ['beta', *household.outputs]
hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('hh_patient')
hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('hh_impatient')
blocks = [hh_patient, hh_impatient, firm, mkt_clearing, aggregate]
blocks_ss = [hh_patient, hh_impatient, firm_ss, mkt_clearing, aggregate]
ks_remapped = create_model(blocks, name='KS-beta-het')
ks_remapped_ss = create_model(blocks_ss, name='KS-beta-het')
# Steady State
calibration = {'eis': 1., 'delta': 0.025, 'alpha': 0.3, 'rho': 0.966, 'sigma': 0.5, 'Y': 1.0, 'L': 1.0,
'nS': 3, 'nA': 100, 'amax': 1000, 'beta_impatient': 0.985, 'mass_patient': 0.5}
unknowns_ss = {'beta_patient': (0.98 / 1.01, 0.999 / 1.01)}
targets_ss = {'asset_mkt': 0.}
ss = ks_remapped_ss.solve_steady_state(calibration, unknowns_ss, targets_ss, solver='brentq')
# Transitional Dynamics/Jacobian Calculation
unknowns = ['K']
targets = ['asset_mkt']
exogenous = ['Z']
return ks_remapped_ss, ss, ks_remapped, unknowns, targets, ss, exogenous
| 3,386 | 32.205882 | 107 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/examples/hank.py | import numpy as np
from sequence_jacobian import grids, simple, create_model, hetblocks
hh = hetblocks.hh_labor.hh
'''Part 1: Blocks'''
@simple
def firm(Y, w, Z, pi, mu, kappa):
L = Y / Z
Div = Y - w * L - mu/(mu-1)/(2*kappa) * (1+pi).apply(np.log)**2 * Y
return L, Div
@simple
def monetary(pi, rstar, phi):
r = (1 + rstar(-1) + phi * pi(-1)) / (1 + pi) - 1
return r
@simple
def nkpc(pi, w, Z, Y, r, mu, kappa):
nkpc_res = kappa * (w / Z - 1 / mu) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) / (1 + r(+1))\
- (1 + pi).apply(np.log)
return nkpc_res
@simple
def fiscal(r, B):
Tax = r * B
return Tax
@simple
def mkt_clearing(A, NE, C, L, Y, B, pi, mu, kappa):
asset_mkt = A - B
labor_mkt = NE - L
goods_mkt = Y - C - mu/(mu-1)/(2*kappa) * (1+pi).apply(np.log)**2 * Y
return asset_mkt, labor_mkt, goods_mkt
@simple
def nkpc_ss(Z, mu):
'''Solve (w) to hit targets for (nkpc_res)'''
w = Z / mu
return w
'''Part 2: Embed HA block'''
def make_grids(rho_s, sigma_s, nS, amax, nA):
e_grid, pi_e, Pi = grids.markov_rouwenhorst(rho=rho_s, sigma=sigma_s, N=nS)
a_grid = grids.agrid(amax=amax, n=nA)
return e_grid, pi_e, Pi, a_grid
def transfers(pi_e, Div, Tax, e_grid):
# hardwired incidence rules are proportional to skill; scale does not matter
tax_rule, div_rule = e_grid, e_grid
div = Div / np.sum(pi_e * div_rule) * div_rule
tax = Tax / np.sum(pi_e * tax_rule) * tax_rule
T = div - tax
return T
def wages(w, e_grid):
we = w * e_grid
return we
def labor_supply(n, e_grid):
ne = e_grid[:, np.newaxis] * n
return ne
'''Part 3: DAG'''
def dag():
# Combine blocks
household = hh.add_hetinputs([transfers, wages, make_grids])
household = household.add_hetoutputs([labor_supply])
blocks = [household, firm, monetary, fiscal, mkt_clearing, nkpc]
blocks_ss = [household, firm, monetary, fiscal, mkt_clearing, nkpc_ss]
hank_model = create_model(blocks, name="One-Asset HANK")
hank_model_ss = create_model(blocks_ss, name="One-Asset HANK SS")
# Steady state
calibration = {'r': 0.005, 'rstar': 0.005, 'eis': 0.5, 'frisch': 0.5, 'B': 5.6,
'mu': 1.2, 'rho_s': 0.966, 'sigma_s': 0.5, 'kappa': 0.1, 'phi': 1.5,
'Y': 1., 'Z': 1., 'pi': 0., 'nS': 2, 'amax': 150, 'nA': 10}
unknowns_ss = {'beta': 0.986, 'vphi': 0.8}
targets_ss = {'asset_mkt': 0., 'NE': 1.}
cali = hank_model_ss.solve_steady_state(calibration, unknowns_ss, targets_ss,
solver='broyden_custom')
ss = hank_model.steady_state(cali)
# Transitional dynamics
unknowns = ['w', 'Y', 'pi']
targets = ['asset_mkt', 'goods_mkt', 'nkpc_res']
exogenous = ['rstar', 'Z']
return hank_model_ss, ss, hank_model, unknowns, targets, exogenous
| 2,880 | 26.179245 | 95 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/examples/rbc.py | from sequence_jacobian import simple, create_model
'''Part 1: Blocks'''
@simple
def firm(K, L, Z, alpha, delta):
r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta
w = (1 - alpha) * Z * (K(-1) / L) ** alpha
Y = Z * K(-1) ** alpha * L ** (1 - alpha)
return r, w, Y
@simple
def household(K, L, w, eis, frisch, vphi, delta):
C = (w / vphi / L ** (1 / frisch)) ** eis
I = K - (1 - delta) * K(-1)
return C, I
@simple
def mkt_clearing(r, C, Y, I, K, L, w, eis, beta):
goods_mkt = Y - C - I
euler = C ** (-1 / eis) - beta * (1 + r(+1)) * C(+1) ** (-1 / eis)
walras = C + K - (1 + r) * K(-1) - w * L
return goods_mkt, euler, walras
'''Part 2: Assembling the model'''
def dag():
# Combine blocks
blocks = [household, firm, mkt_clearing]
rbc_model = create_model(blocks, name="RBC")
# Steady state
calibration = {'eis': 1., 'frisch': 1., 'delta': 0.025, 'alpha': 0.11, 'L': 1.}
unknowns_ss = {'vphi': 0.92, 'beta': 1 / (1 + 0.01), 'K': 2., 'Z': 1.}
targets_ss = {'goods_mkt': 0., 'r': 0.01, 'euler': 0., 'Y': 1.}
ss = rbc_model.solve_steady_state(calibration, unknowns_ss, targets_ss, solver='hybr')
# Transitional dynamics
unknowns = ['K', 'L']
targets = ['goods_mkt', 'euler']
exogenous = ['Z']
return rbc_model, ss, unknowns, targets, exogenous
| 1,345 | 27.041667 | 90 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/examples/__init__.py | """Example models""" | 20 | 20 | 20 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/examples/two_asset.py | import numpy as np
from sequence_jacobian import simple, solved, combine, create_model, grids, hetblocks
hh = hetblocks.hh_twoasset.hh
'''Part 1: Blocks'''
@simple
def pricing(pi, mc, r, Y, kappap, mup):
nkpc = kappap * (mc - 1 / mup) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) \
/ (1 + r(+1)) - (1 + pi).apply(np.log)
return nkpc
@simple
def arbitrage(div, p, r):
equity = div(+1) + p(+1) - p * (1 + r(+1))
return equity
@simple
def labor(Y, w, K, Z, alpha):
N = (Y / Z / K(-1) ** alpha) ** (1 / (1 - alpha))
mc = w * N / (1 - alpha) / Y
return N, mc
@simple
def investment(Q, K, r, N, mc, Z, delta, epsI, alpha):
inv = (K / K(-1) - 1) / (delta * epsI) + 1 - Q
val = alpha * Z(+1) * (N(+1) / K) ** (1 - alpha) * mc(+1) -\
(K(+1) / K - (1 - delta) + (K(+1) / K - 1) ** 2 / (2 * delta * epsI)) +\
K(+1) / K * Q(+1) - (1 + r(+1)) * Q
return inv, val
@simple
def dividend(Y, w, N, K, pi, mup, kappap, delta, epsI):
psip = mup / (mup - 1) / 2 / kappap * (1 + pi).apply(np.log) ** 2 * Y
k_adjust = K(-1) * (K / K(-1) - 1) ** 2 / (2 * delta * epsI)
I = K - (1 - delta) * K(-1) + k_adjust
div = Y - w * N - I - psip
return psip, I, div
@simple
def taylor(rstar, pi, phi):
i = rstar + phi * pi
return i
@simple
def fiscal(r, w, N, G, Bg):
tax = (r * Bg + G) / w / N
return tax
@simple
def finance(i, p, pi, r, div, omega, pshare):
rb = r - omega
ra = pshare(-1) * (div + p) / p(-1) + (1 - pshare(-1)) * (1 + r) - 1
fisher = 1 + i(-1) - (1 + r) * (1 + pi)
return rb, ra, fisher
@simple
def wage(pi, w):
piw = (1 + pi) * w / w(-1) - 1
return piw
@simple
def union(piw, N, tax, w, UCE, kappaw, muw, vphi, frisch, beta):
wnkpc = kappaw * (vphi * N ** (1 + 1 / frisch) - (1 - tax) * w * N * UCE / muw) + beta * \
(1 + piw(+1)).apply(np.log) - (1 + piw).apply(np.log)
return wnkpc
@simple
def mkt_clearing(p, A, B, Bg, C, I, G, CHI, psip, omega, Y):
wealth = A + B
asset_mkt = p + Bg - wealth
goods_mkt = C + I + G + CHI + psip + omega * B - Y
return asset_mkt, wealth, goods_mkt
@simple
def share_value(p, tot_wealth, Bh):
pshare = p / (tot_wealth - Bh)
return pshare
@solved(unknowns={'pi': (-0.1, 0.1)}, targets=['nkpc'], solver="brentq")
def pricing_solved(pi, mc, r, Y, kappap, mup):
nkpc = kappap * (mc - 1 / mup) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) / \
(1 + r(+1)) - (1 + pi).apply(np.log)
return nkpc
@solved(unknowns={'p': (5, 15)}, targets=['equity'], solver="brentq")
def arbitrage_solved(div, p, r):
equity = div(+1) + p(+1) - p * (1 + r(+1))
return equity
@simple
def partial_ss(Y, N, K, r, tot_wealth, Bg, delta):
"""Solves for (mup, alpha, Z, w) to hit (tot_wealth, Y, K, pi)."""
# 1. Solve for markup to hit total wealth
p = tot_wealth - Bg
mc = 1 - r * (p - K) / Y
mup = 1 / mc
# 2. Solve for capital share to hit K
alpha = (r + delta) * K / Y / mc
# 3. Solve for TFP to hit Y
Z = Y * K ** (-alpha) * N ** (alpha - 1)
# 4. Solve for w such that piw = 0
w = mc * (1 - alpha) * Y / N
return p, mc, mup, alpha, Z, w
@simple
def union_ss(tax, w, UCE, N, muw, frisch):
"""Solves for (vphi) to hit (wnkpc)."""
vphi = (1 - tax) * w * UCE / muw / N ** (1 + 1 / frisch)
wnkpc = vphi * N ** (1 + 1 / frisch) - (1 - tax) * w * UCE / muw
return vphi, wnkpc
'''Part 2: Embed HA block'''
def make_grids(bmax, amax, kmax, nB, nA, nK, nZ, rho_z, sigma_z):
b_grid = grids.agrid(amax=bmax, n=nB)
a_grid = grids.agrid(amax=amax, n=nA)
k_grid = grids.agrid(amax=kmax, n=nK)[::-1].copy()
e_grid, _, Pi = grids.markov_rouwenhorst(rho=rho_z, sigma=sigma_z, N=nZ)
return b_grid, a_grid, k_grid, e_grid, Pi
def income(e_grid, tax, w, N):
z_grid = (1 - tax) * w * N * e_grid
return z_grid
'''Part 3: DAG'''
def dag():
# Combine Blocks
household = hh.add_hetinputs([income, make_grids])
production = combine([labor, investment])
production_solved = production.solved(unknowns={'Q': 1., 'K': 10.},
targets=['inv', 'val'], solver='broyden_custom')
blocks = [household, pricing_solved, arbitrage_solved, production_solved,
dividend, taylor, fiscal, share_value, finance, wage, union, mkt_clearing]
two_asset_model = create_model(blocks, name='Two-Asset HANK')
# Steadt state DAG
blocks_ss = [household, partial_ss,
dividend, taylor, fiscal, share_value, finance, union_ss, mkt_clearing]
two_asset_model_ss = create_model(blocks_ss, name='Two-Asset HANK SS')
# Steady State
calibration = {'Y': 1., 'N': 1.0, 'K': 10., 'r': 0.0125, 'rstar': 0.0125, 'tot_wealth': 14,
'delta': 0.02, 'pi': 0.,
'kappap': 0.1, 'muw': 1.1, 'Bh': 1.04, 'Bg': 2.8, 'G': 0.2, 'eis': 0.5,
'frisch': 1, 'chi0': 0.25, 'chi2': 2, 'epsI': 4, 'omega': 0.005,
'kappaw': 0.1, 'phi': 1.5, 'nZ': 3, 'nB': 10, 'nA': 16, 'nK': 4,
'bmax': 50, 'amax': 4000, 'kmax': 1, 'rho_z': 0.966, 'sigma_z': 0.92}
unknowns_ss = {'beta': 0.976, 'chi1': 6.5}
targets_ss = {'asset_mkt': 0., 'B': 'Bh'}
cali = two_asset_model_ss.solve_steady_state(calibration, unknowns_ss, targets_ss, solver='broyden_custom')
ss = two_asset_model.steady_state(cali)
# Transitional Dynamics/Jacobian Calculation
unknowns = ['r', 'w', 'Y']
targets = ['asset_mkt', 'fisher', 'wnkpc']
exogenous = ['rstar', 'Z', 'G']
return two_asset_model_ss, ss, two_asset_model, unknowns, targets, exogenous
| 5,688 | 29.751351 | 111 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/hetblocks/hh_twoasset.py | import numpy as np
from numba import guvectorize
from ..blocks.het_block import het
from .. import interpolate
def hh_init(b_grid, a_grid, z_grid, eis):
Va = (0.6 + 1.1 * b_grid[:, np.newaxis] + a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1))
Vb = (0.5 + b_grid[:, np.newaxis] + 1.2 * a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1))
return Va, Vb
def adjustment_costs(a, a_grid, ra, chi0, chi1, chi2):
chi = get_Psi_and_deriv(a, a_grid, ra, chi0, chi1, chi2)[0]
return chi
def marginal_cost_grid(a_grid, ra, chi0, chi1, chi2):
# precompute Psi1(a', a) on grid of (a', a) for steps 3 and 5
Psi1 = get_Psi_and_deriv(a_grid[:, np.newaxis],
a_grid[np.newaxis, :], ra, chi0, chi1, chi2)[1]
return Psi1
# policy and bacward order as in grid!
@het(exogenous='Pi', policy=['b', 'a'], backward=['Vb', 'Va'],
hetinputs=[marginal_cost_grid], hetoutputs=[adjustment_costs], backward_init=hh_init)
def hh(Va_p, Vb_p, a_grid, b_grid, z_grid, e_grid, k_grid, beta, eis, rb, ra, chi0, chi1, chi2, Psi1):
# === STEP 2: Wb(z, b', a') and Wa(z, b', a') ===
# (take discounted expectation of tomorrow's value function)
Wb = beta * Vb_p
Wa = beta * Va_p
W_ratio = Wa / Wb
# === STEP 3: a'(z, b', a) for UNCONSTRAINED ===
# for each (z, b', a), linearly interpolate to find a' between gridpoints
# satisfying optimality condition W_ratio == 1+Psi1
i, pi = lhs_equals_rhs_interpolate(W_ratio, 1 + Psi1)
# use same interpolation to get Wb and then c
a_endo_unc = interpolate.apply_coord(i, pi, a_grid)
c_endo_unc = interpolate.apply_coord(i, pi, Wb) ** (-eis)
# === STEP 4: b'(z, b, a), a'(z, b, a) for UNCONSTRAINED ===
# solve out budget constraint to get b(z, b', a)
b_endo = (c_endo_unc + a_endo_unc + addouter(-z_grid, b_grid, -(1 + ra) * a_grid)
+ get_Psi_and_deriv(a_endo_unc, a_grid, ra, chi0, chi1, chi2)[0]) / (1 + rb)
# interpolate this b' -> b mapping to get b -> b', so we have b'(z, b, a)
# and also use interpolation to get a'(z, b, a)
# (note utils.interpolate.interpolate_coord and utils.interpolate.apply_coord work on last axis,
# so we need to swap 'b' to the last axis, then back when done)
i, pi = interpolate.interpolate_coord(b_endo.swapaxes(1, 2), b_grid)
a_unc = interpolate.apply_coord(i, pi, a_endo_unc.swapaxes(1, 2)).swapaxes(1, 2)
b_unc = interpolate.apply_coord(i, pi, b_grid).swapaxes(1, 2)
# === STEP 5: a'(z, kappa, a) for CONSTRAINED ===
# for each (z, kappa, a), linearly interpolate to find a' between gridpoints
# satisfying optimality condition W_ratio/(1+kappa) == 1+Psi1, assuming b'=0
lhs_con = W_ratio[:, 0:1, :] / (1 + k_grid[np.newaxis, :, np.newaxis])
i, pi = lhs_equals_rhs_interpolate(lhs_con, 1 + Psi1)
# use same interpolation to get Wb and then c
a_endo_con = interpolate.apply_coord(i, pi, a_grid)
c_endo_con = ((1 + k_grid[np.newaxis, :, np.newaxis]) ** (-eis)
* interpolate.apply_coord(i, pi, Wb[:, 0:1, :]) ** (-eis))
# === STEP 6: a'(z, b, a) for CONSTRAINED ===
# solve out budget constraint to get b(z, kappa, a), enforcing b'=0
b_endo = (c_endo_con + a_endo_con
+ addouter(-z_grid, np.full(len(k_grid), b_grid[0]), -(1 + ra) * a_grid)
+ get_Psi_and_deriv(a_endo_con, a_grid, ra, chi0, chi1, chi2)[0]) / (1 + rb)
# interpolate this kappa -> b mapping to get b -> kappa
# then use the interpolated kappa to get a', so we have a'(z, b, a)
# (utils.interpolate.interpolate_y does this in one swoop, but since it works on last
# axis, we need to swap kappa to last axis, and then b back to middle when done)
a_con = interpolate.interpolate_y(b_endo.swapaxes(1, 2), b_grid,
a_endo_con.swapaxes(1, 2)).swapaxes(1, 2)
# === STEP 7: obtain policy functions and update derivatives of value function ===
# combine unconstrained solution and constrained solution, choosing latter
# when unconstrained goes below minimum b
a, b = a_unc.copy(), b_unc.copy()
b[b <= b_grid[0]] = b_grid[0]
a[b <= b_grid[0]] = a_con[b <= b_grid[0]]
# calculate adjustment cost and its derivative
Psi, _, Psi2 = get_Psi_and_deriv(a, a_grid, ra, chi0, chi1, chi2)
# solve out budget constraint to get consumption and marginal utility
c = addouter(z_grid, (1 + rb) * b_grid, (1 + ra) * a_grid) - Psi - a - b
uc = c ** (-1 / eis)
uce = e_grid[:, np.newaxis, np.newaxis] * uc
# update derivatives of value function using envelope conditions
Va = (1 + ra - Psi2) * uc
Vb = (1 + rb) * uc
return Va, Vb, a, b, c, uce
'''Supporting functions for HA block'''
def get_Psi_and_deriv(ap, a, ra, chi0, chi1, chi2):
"""Adjustment cost Psi(ap, a) and its derivatives with respect to
first argument (ap) and second argument (a)"""
a_with_return = (1 + ra) * a
a_change = ap - a_with_return
abs_a_change = np.abs(a_change)
sign_change = np.sign(a_change)
adj_denominator = a_with_return + chi0
core_factor = (abs_a_change / adj_denominator) ** (chi2 - 1)
Psi = chi1 / chi2 * abs_a_change * core_factor
Psi1 = chi1 * sign_change * core_factor
Psi2 = -(1 + ra) * (Psi1 + (chi2 - 1) * Psi / adj_denominator)
return Psi, Psi1, Psi2
def matrix_times_first_dim(A, X):
"""Take matrix A times vector X[:, i1, i2, i3, ... , in] separately
for each i1, i2, i3, ..., in. Same output as A @ X if X is 1D or 2D"""
# flatten all dimensions of X except first, then multiply, then restore shape
return (A @ X.reshape(X.shape[0], -1)).reshape(X.shape)
def addouter(z, b, a):
"""Take outer sum of three arguments: result[i, j, k] = z[i] + b[j] + a[k]"""
return z[:, np.newaxis, np.newaxis] + b[:, np.newaxis] + a
@guvectorize(['void(float64[:], float64[:,:], uint32[:], float64[:])'], '(ni),(ni,nj)->(nj),(nj)')
def lhs_equals_rhs_interpolate(lhs, rhs, iout, piout):
"""
Given lhs (i) and rhs (i,j), for each j, find the i such that
lhs[i] > rhs[i,j] and lhs[i+1] < rhs[i+1,j]
i.e. where given j, lhs == rhs in between i and i+1.
Also return the pi such that
pi*(lhs[i] - rhs[i,j]) + (1-pi)*(lhs[i+1] - rhs[i+1,j]) == 0
i.e. such that the point at pi*i + (1-pi)*(i+1) satisfies lhs == rhs by linear interpolation.
If lhs[0] < rhs[0,j] already, just return u=0 and pi=1.
***IMPORTANT: Assumes that solution i is monotonically increasing in j
and that lhs - rhs is monotonically decreasing in i.***
"""
ni, nj = rhs.shape
assert len(lhs) == ni
i = 0
for j in range(nj):
while True:
if lhs[i] < rhs[i, j]:
break
elif i < nj - 1:
i += 1
else:
break
if i == 0:
iout[j] = 0
piout[j] = 1
else:
iout[j] = i - 1
err_upper = rhs[i, j] - lhs[i]
err_lower = rhs[i - 1, j] - lhs[i - 1]
piout[j] = err_upper / (err_upper - err_lower)
| 7,161 | 38.351648 | 102 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/hetblocks/hh_labor.py | '''Standard Incomplete Market model with Endogenous Labor Supply'''
import numpy as np
from numba import vectorize, njit
from ..blocks.het_block import het
from .. import interpolate
def hh_init(a_grid, we, r, eis, T):
fininc = (1 + r) * a_grid + T[:, np.newaxis] - a_grid[0]
coh = (1 + r) * a_grid[np.newaxis, :] + we[:, np.newaxis] + T[:, np.newaxis]
Va = (1 + r) * (0.1 * coh) ** (-1 / eis)
return fininc, Va
@het(exogenous='Pi', policy='a', backward='Va', backward_init=hh_init)
def hh(Va_p, a_grid, we, T, r, beta, eis, frisch, vphi):
'''Single backward step via EGM.'''
uc_nextgrid = beta * Va_p
c_nextgrid, n_nextgrid = cn(uc_nextgrid, we[:, np.newaxis], eis, frisch, vphi)
lhs = c_nextgrid - we[:, np.newaxis] * n_nextgrid + a_grid[np.newaxis, :] - T[:, np.newaxis]
rhs = (1 + r) * a_grid
c = interpolate.interpolate_y(lhs, rhs, c_nextgrid)
n = interpolate.interpolate_y(lhs, rhs, n_nextgrid)
a = rhs + we[:, np.newaxis] * n + T[:, np.newaxis] - c
iconst = np.nonzero(a < a_grid[0])
a[iconst] = a_grid[0]
if iconst[0].size != 0 and iconst[1].size != 0:
c[iconst], n[iconst] = solve_cn(we[iconst[0]],
rhs[iconst[1]] + T[iconst[0]] - a_grid[0],
eis, frisch, vphi, Va_p[iconst])
Va = (1 + r) * c ** (-1 / eis)
return Va, a, c, n
'''Supporting functions for HA block'''
@njit
def cn(uc, w, eis, frisch, vphi):
"""Return optimal c, n as function of u'(c) given parameters"""
return uc ** (-eis), (w * uc / vphi) ** frisch
def solve_cn(w, T, eis, frisch, vphi, uc_seed):
uc = solve_uc(w, T, eis, frisch, vphi, uc_seed)
return cn(uc, w, eis, frisch, vphi)
@vectorize
def solve_uc(w, T, eis, frisch, vphi, uc_seed):
"""Solve for optimal uc given in log uc space.
max_{c, n} c**(1-1/eis) + vphi*n**(1+1/frisch) s.t. c = w*n + T
"""
log_uc = np.log(uc_seed)
for i in range(30):
ne, ne_p = netexp(log_uc, w, T, eis, frisch, vphi)
if abs(ne) < 1E-11:
break
else:
log_uc -= ne / ne_p
else:
raise ValueError("Cannot solve constrained household's problem: No convergence after 30 iterations!")
return np.exp(log_uc)
@njit
def netexp(log_uc, w, T, eis, frisch, vphi):
"""Return net expenditure as a function of log uc and its derivative."""
c, n = cn(np.exp(log_uc), w, eis, frisch, vphi)
ne = c - w * n - T
# c and n have elasticities of -eis and frisch wrt log u'(c)
c_loguc = -eis * c
n_loguc = frisch * n
netexp_loguc = c_loguc - w * n_loguc
return ne, netexp_loguc
| 2,678 | 30.151163 | 109 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/hetblocks/hh_sim.py | '''Standard Incomplete Market model'''
import numpy as np
from ..blocks.het_block import het
from .. import interpolate, misc, grids
'''Core HetBlock'''
def hh_init(a_grid, y, r, eis):
coh = (1 + r) * a_grid[np.newaxis, :] + y[:, np.newaxis]
Va = (1 + r) * (0.1 * coh) ** (-1 / eis)
return Va
@het(exogenous='Pi', policy='a', backward='Va', backward_init=hh_init)
def hh(Va_p, a_grid, y, r, beta, eis):
uc_nextgrid = beta * Va_p
c_nextgrid = uc_nextgrid ** (-eis)
coh = (1 + r) * a_grid[np.newaxis, :] + y[:, np.newaxis]
a = interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid)
misc.setmin(a, a_grid[0])
c = coh - a
Va = (1 + r) * c ** (-1 / eis)
return Va, a, c
'''Extended HetBlock with grid and income process inputs added, and example calibration'''
def make_grids(rho_e, sd_e, n_e, min_a, max_a, n_a):
e_grid, _, Pi = grids.markov_rouwenhorst(rho_e, sd_e, n_e)
a_grid = grids.asset_grid(min_a, max_a, n_a)
return e_grid, Pi, a_grid
def income(w, e_grid):
y = w * e_grid
return y
hh_extended = hh.add_hetinputs([income, make_grids])
def example_calibration():
return dict(min_a=0, max_a=1000, rho_e=0.975, sd_e=0.7, n_a=200, n_e=7,
w=1, r=0.01/4, beta=1-0.08/4, eis=1)
| 1,283 | 26.319149 | 90 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/hetblocks/__init__.py | '''Heterogeneous agent blocks'''
from . import hh_labor, hh_sim, hh_twoasset
| 77 | 25 | 43 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/discretize.py | """Grids and Markov chains"""
import numpy as np
from scipy.stats import norm
def asset_grid(amin, amax, n):
# find maximum ubar of uniform grid corresponding to desired maximum amax of asset grid
ubar = np.log(1 + np.log(1 + amax - amin))
# make uniform grid
u_grid = np.linspace(0, ubar, n)
# double-exponentiate uniform grid and add amin to get grid from amin to amax
return amin + np.exp(np.exp(u_grid) - 1) - 1
def agrid(amax, n, amin=0):
"""Create grid between amin and amax that is equidistant in logs."""
pivot = np.abs(amin) + 0.25
a_grid = np.geomspace(amin + pivot, amax + pivot, n) - pivot
a_grid[0] = amin # make sure *exactly* equal to amin
return a_grid
# TODO: Temporarily include the old way of constructing grids from ikc_old for comparability of results
def agrid_old(amax, N, amin=0, frac=1/25):
"""crappy discretization method we've been using, generates N point
log-spaced grid between bmin and bmax, choosing pivot such that 'frac' of
total log space between log(1+amin) and log(1+amax) beneath it"""
apivot = (1+amin)**(1-frac)*(1+amax)**frac - 1
a = np.geomspace(amin+apivot,amax+apivot,N) - apivot
a[0] = amin
return a
def nonlinspace(amax, n, phi, amin=0):
"""Create grid between amin and amax. phi=1 is equidistant, phi>1 dense near amin. Extra flexibility may be useful in non-convex problems in which policy functions have nonlinear (even non-monotonic) sections far from the borrowing limit."""
a_grid = np.zeros(n)
a_grid[0] = amin
for i in range(1, n):
a_grid[i] = a_grid[i-1] + (amax - a_grid[i-1]) / (n-i)**phi
return a_grid
def stationary(Pi, pi_seed=None, tol=1E-11, maxit=10_000):
"""Find invariant distribution of a Markov chain by iteration."""
if pi_seed is None:
pi = np.ones(Pi.shape[0]) / Pi.shape[0]
else:
pi = pi_seed
for it in range(maxit):
pi_new = pi @ Pi
if np.max(np.abs(pi_new - pi)) < tol:
break
pi = pi_new
else:
raise ValueError(f'No convergence after {maxit} forward iterations!')
pi = pi_new
return pi
def mean(x, pi):
"""Mean of discretized random variable with support x and probability mass function pi."""
return np.sum(pi * x)
def variance(x, pi):
"""Variance of discretized random variable with support x and probability mass function pi."""
return np.sum(pi * (x - np.sum(pi * x)) ** 2)
def std(x, pi):
"""Standard deviation of discretized random variable with support x and probability mass function pi."""
return np.sqrt(variance(x, pi))
def cov(x, y, pi):
"""Covariance of two discretized random variables with supports x and y common probability mass function pi."""
return np.sum(pi * (x - mean(x, pi)) * (y - mean(y, pi)))
def corr(x, y, pi):
"""Correlation of two discretized random variables with supports x and y common probability mass function pi."""
return cov(x, y, pi) / (std(x, pi) * std(y, pi))
def markov_tauchen(rho, sigma, N=7, m=3, normalize=True):
"""Tauchen method discretizing AR(1) s_t = rho*s_(t-1) + eps_t.
Parameters
----------
rho : scalar, persistence
sigma : scalar, unconditional sd of s_t
N : int, number of states in discretized Markov process
m : scalar, discretized s goes from approx -m*sigma to m*sigma
Returns
----------
y : array (N), states proportional to exp(s) s.t. E[y] = 1
pi : array (N), stationary distribution of discretized process
Pi : array (N*N), Markov matrix for discretized process
"""
# make normalized grid, start with cross-sectional sd of 1
s = np.linspace(-m, m, N)
ds = s[1] - s[0]
sd_innov = np.sqrt(1 - rho ** 2)
# standard Tauchen method to generate Pi given N and m
Pi = np.empty((N, N))
Pi[:, 0] = norm.cdf(s[0] - rho * s + ds / 2, scale=sd_innov)
Pi[:, -1] = 1 - norm.cdf(s[-1] - rho * s - ds / 2, scale=sd_innov)
for j in range(1, N - 1):
Pi[:, j] = (norm.cdf(s[j] - rho * s + ds / 2, scale=sd_innov) -
norm.cdf(s[j] - rho * s - ds / 2, scale=sd_innov))
# invariant distribution and scaling
pi = stationary(Pi)
s *= (sigma / np.sqrt(variance(s, pi)))
if normalize:
y = np.exp(s) / np.sum(pi * np.exp(s))
else:
y = s
return y, pi, Pi
def markov_rouwenhorst(rho, sigma, N=7):
"""Rouwenhorst method analog to markov_tauchen"""
# Explicitly typecast N as an integer, since when the grid constructor functions
# (e.g. the function that makes a_grid) are implemented as blocks, they interpret the integer-valued calibration
# as a float.
N = int(N)
# parametrize Rouwenhorst for n=2
p = (1 + rho) / 2
Pi = np.array([[p, 1 - p], [1 - p, p]])
# implement recursion to build from n=3 to n=N
for n in range(3, N + 1):
P1, P2, P3, P4 = (np.zeros((n, n)) for _ in range(4))
P1[:-1, :-1] = p * Pi
P2[:-1, 1:] = (1 - p) * Pi
P3[1:, :-1] = (1 - p) * Pi
P4[1:, 1:] = p * Pi
Pi = P1 + P2 + P3 + P4
Pi[1:-1] /= 2
# invariant distribution and scaling
pi = stationary(Pi)
s = np.linspace(-1, 1, N)
s *= (sigma / np.sqrt(variance(s, pi)))
y = np.exp(s) / np.sum(pi * np.exp(s))
return y, pi, Pi
| 5,371 | 32.575 | 245 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/differentiate.py | """Numerical differentiation"""
from .misc import make_tuple
def numerical_diff(func, ssinputs_dict, shock_dict, h=1E-4, y_ss_list=None):
"""Differentiate function numerically via forward difference, i.e. calculate
f'(xss)*shock = (f(xss + h*shock) - f(xss))/h
for small h. (Variable names inspired by application of differentiating around ss.)
Parameters
----------
func : function, 'f' to be differentiated
ssinputs_dict : dict, values in 'xss' around which to differentiate
shock_dict : dict, values in 'shock' for which we're taking derivative
(keys in shock_dict are weak subset of keys in ssinputs_dict)
h : [optional] scalar, scaling of forward difference 'h'
y_ss_list : [optional] list, value of y=f(xss) if we already have it
Returns
----------
dy_list : list, output f'(xss)*shock of numerical differentiation
"""
# compute ss output if not supplied
if y_ss_list is None:
y_ss_list = make_tuple(func(**ssinputs_dict))
# response to small shock
shocked_inputs = {**ssinputs_dict, **{k: ssinputs_dict[k] + h * shock for k, shock in shock_dict.items()}}
y_list = make_tuple(func(**shocked_inputs))
# scale responses back up, dividing by h
dy_list = [(y - y_ss) / h for y, y_ss in zip(y_list, y_ss_list)]
return dy_list
def numerical_diff_symmetric(func, ssinputs_dict, shock_dict, h=1E-4):
"""Same as numerical_diff, but differentiate numerically using central (symmetric) difference, i.e.
f'(xss)*shock = (f(xss + h*shock) - f(xss - h*shock))/(2*h)
"""
# response to small shock in each direction
shocked_inputs_up = {**ssinputs_dict, **{k: ssinputs_dict[k] + h * shock for k, shock in shock_dict.items()}}
y_up_list = make_tuple(func(**shocked_inputs_up))
shocked_inputs_down = {**ssinputs_dict, **{k: ssinputs_dict[k] - h * shock for k, shock in shock_dict.items()}}
y_down_list = make_tuple(func(**shocked_inputs_down))
# scale responses back up, dividing by h
dy_list = [(y_up - y_down) / (2*h) for y_up, y_down in zip(y_up_list, y_down_list)]
return dy_list
| 2,191 | 37.45614 | 115 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/optimized_routines.py | """Njitted routines to speed up some steps in backward iteration or aggregation"""
import numpy as np
from numba import njit
@njit
def setmin(x, xmin):
"""Set 2-dimensional array x where each row is ascending equal to equal to max(x, xmin)."""
ni, nj = x.shape
for i in range(ni):
for j in range(nj):
if x[i, j] < xmin:
x[i, j] = xmin
else:
break
@njit
def within_tolerance(x1, x2, tol):
"""Efficiently test max(abs(x1-x2)) <= tol for arrays of same dimensions x1, x2."""
y1 = x1.ravel()
y2 = x2.ravel()
for i in range(y1.shape[0]):
if np.abs(y1[i] - y2[i]) > tol:
return False
return True
@njit
def fast_aggregate(X, Y):
"""If X has dims (T, ...) and Y has dims (T, ...), do dot product for each T to get length-T vector.
Identical to np.sum(X*Y, axis=(1,...,X.ndim-1)) but avoids costly creation of intermediates,
useful for speeding up aggregation in td by factor of 4 to 5."""
T = X.shape[0]
Xnew = X.reshape(T, -1)
Ynew = Y.reshape(T, -1)
Z = np.empty(T)
for t in range(T):
Z[t] = Xnew[t, :] @ Ynew[t, :]
return Z
| 1,188 | 26.022727 | 104 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/bijection.py | from .ordered_set import OrderedSet
class Bijection:
def __init__(self, map):
# identity always implicit, remove if there explicitly
self.map = {k: v for k, v in map.items() if k != v}
invmap = {}
for k, v in map.items():
if v in invmap:
raise ValueError(f'Duplicate value {v}, for keys {invmap[v]} and {k}')
invmap[v] = k
self.invmap = invmap
@property
def inv(self):
invmap = Bijection.__new__(Bijection) # better way to do this?
invmap.map = self.invmap
invmap.invmap = self.map
return invmap
def __repr__(self):
return f'Bijection({repr(self.map)})'
def __getitem__(self, k):
return self.map.get(k, k)
def __matmul__(self, x):
if x is None:
return None
elif isinstance(x, str) or isinstance(x, int):
return self[x]
elif isinstance(x, Bijection):
# compose self: v -> u with x: w -> v
# assume everything missing in either is the identity
M = {}
for v, u in self.map.items():
w = x.invmap.get(v, v)
M[w] = u
for w, v in x.map.items():
if v not in self.map:
M[w] = v
return Bijection(M)
elif isinstance(x, dict):
d = {}
for k, v in x.items():
if k in self.map:
d[self.map[k]] = v
elif k not in d:
# don't overwrite if we already mapped to this
# effectively this prioritizes the remapped names over others
d[k] = v
return d
elif isinstance(x, list):
return [self[k] for k in x]
elif isinstance(x, set):
return {self[k] for k in x}
elif isinstance(x, tuple):
return tuple(self[k] for k in x)
elif isinstance(x, OrderedSet):
return OrderedSet([self[k] for k in x])
else:
return NotImplemented
def __rmatmul__(self, x):
if isinstance(x, str):
return self[x]
elif isinstance(x, dict):
d = {}
for k, v in x.items():
if k in self.map:
d[self.map[k]] = v
elif k not in d:
# don't overwrite if we already mapped to this
# effectively this prioritizes the remapped names over others
d[k] = v
return d
elif isinstance(x, list):
return [self[k] for k in x]
elif isinstance(x, set):
return {self[k] for k in x}
elif isinstance(x, tuple):
return tuple(self[k] for k in x)
else:
return NotImplemented
def __bool__(self):
return bool(self.map) | 2,910 | 32.45977 | 86 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/ordered_set.py | from typing import Iterable
class OrderedSet:
"""Ordered set implemented as dict (where key insertion order is preserved) mapping all to None.
Operations on multiple ordered sets (e.g. union) order all members of first argument first, then
second argument. If a member is in both, order is as early as possible.
See test_misc_support.test_ordered_set() for examples."""
def __init__(self, members: Iterable = []):
self.d = {k: None for k in members}
def dict_from(self, s):
return dict(zip(self, s))
def __iter__(self):
return iter(self.d)
def __reversed__(self):
return OrderedSet(list(self)[::-1])
def __repr__(self):
return f"OrderedSet({list(self)})"
def __str__(self):
return str(list(self.d))
def __contains__(self, k):
return k in self.d
def __len__(self):
return len(self.d)
def __getitem__(self, i):
return list(self.d)[i]
def add(self, x):
self.d[x] = None
def difference(self, s):
return OrderedSet(k for k in self if k not in s)
def difference_update(self, s):
self.d = self.difference(s).d
return self
def discard(self, k):
self.d.pop(k, None)
def intersection(self, s):
return OrderedSet(k for k in self if k in s)
def intersection_update(self, s):
self.d = self.intersection(s).d
return self
def isdisjoint(self, s):
return len(self.intersection(s)) == 0
def issubset(self, s):
return len(self.difference(s)) == 0
def issuperset(self, s):
return len(self.intersection(s)) == len(s)
def remove(self, k):
self.d.pop(k)
def symmetric_difference(self, s):
diff = self.difference(s)
for k in s:
if k not in self:
diff.add(k)
return diff
def symmetric_difference_update(self, s):
self.d = self.symmetric_difference(s).d
return self
def union(self, s):
return self.copy().update(s)
def update(self, s):
for k in s:
self.add(k)
return self
def copy(self):
return OrderedSet(self)
def __eq__(self, s):
if isinstance(s, OrderedSet):
return list(self) == list(s)
else:
return False
def __le__(self, s):
return self.issubset(s)
def __lt__(self, s):
return self.issubset(s) and (len(self) != len(s))
def __ge__(self, s):
return self.issuperset(s)
def __gt__(self, s):
return self.issuperset(s) and (len(self) != len(s))
def __or__(self, s):
return self.union(s)
def __ior__(self, s):
return self.update(s)
def __ror__(self, s):
return self.union(s)
def __and__(self, s):
return self.intersection(s)
def __iand__(self, s):
return self.intersection_update(s)
def __rand__(self, s):
return self.intersection(s)
def __sub__(self, s):
return self.difference(s)
def __isub__(self, s):
return self.difference_update(s)
def __rsub__(self, s):
return OrderedSet(s).difference(self)
def __xor__(self, s):
return self.symmetric_difference(s)
def __ixor__(self, s):
return self.symmetric_difference_update(s)
def __rxor__(self, s):
return OrderedSet(s).symmetric_difference(self)
"""Compatibility methods, regular use not advised"""
def pop(self):
k = self.top()
del self.d[k]
return k
def top(self):
return list(self.d)[-1]
def index(self, k):
return list(self.d).index(k)
| 3,740 | 22.677215 | 100 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/misc.py | """Assorted other utilities"""
import numpy as np
import scipy.linalg
from numba import njit, guvectorize
def make_tuple(x):
"""If not tuple or list, make into tuple with one element.
Wrapping with this allows user to write, e.g.:
"return r" rather than "return (r,)"
"policy='a'" rather than "policy=('a',)"
"""
return (x,) if not (isinstance(x, tuple) or isinstance(x, list)) else x
def numeric_primitive(instance):
# If it is already a primitive, just return it
if type(instance) in {int, float}:
return instance
elif isinstance(instance, np.ndarray):
if np.issubdtype(instance.dtype, np.number):
return np.array(instance)
else:
raise ValueError(f"The tuple/list argument provided to numeric_primitive has dtype: {instance.dtype},"
f" which is not a valid numeric type.")
elif type(instance) in {tuple, list}:
instance_array = np.asarray(instance)
if np.issubdtype(instance_array.dtype, np.number):
return type(instance)(instance_array)
else:
raise ValueError(f"The tuple/list argument provided to numeric_primitive has dtype: {instance_array.dtype},"
f" which is not a valid numeric type.")
else:
return instance.real if np.isscalar(instance) else instance.base
def demean(x):
return x - x.sum()/x.size
# simpler aliases for LU factorization and solution
def factor(X):
return scipy.linalg.lu_factor(X)
def factored_solve(Z, y):
return scipy.linalg.lu_solve(Z, y)
# The below functions are used in steady_state
def unprime(s):
"""Given a variable's name as a `str`, check if the variable is a prime, i.e. has "_p" at the end.
If so, return the unprimed version, if not return itself."""
if s[-2:] == "_p":
return s[:-2]
else:
return s
def uncapitalize(s):
return s[0].lower() + s[1:]
def list_diff(l1, l2):
"""Returns the list that is the "set difference" between l1 and l2 (based on element values)"""
o_list = []
for k in set(l1) - set(l2):
o_list.append(k)
return o_list
def dict_diff(d1, d2):
"""Returns the dictionary that is the "set difference" between d1 and d2 (based on keys, not key-value pairs)
E.g. d1 = {"a": 1, "b": 2}, d2 = {"b": 5}, then dict_diff(d1, d2) = {"a": 1}
"""
o_dict = {}
for k in set(d1.keys()) - set(d2.keys()):
o_dict[k] = d1[k]
return o_dict
def smart_set(data):
# We want set to construct a single-element set for strings, i.e. ignoring the .iter method of strings
if isinstance(data, str):
return {data}
else:
return set(data)
def smart_zip(keys, values):
"""For handling the case where keys and values may be scalars"""
if isinstance(values, float):
return zip(keys, [values])
else:
return zip(keys, values)
def smart_zeros(n):
"""Return either the float 0. or a np.ndarray of length 0 depending on whether n > 1"""
if n > 1:
return np.zeros(n)
else:
return 0.
'''Tools for taste shocks used in discrete choice problems'''
def logit(V, scale):
"""Logit choice probability of choosing along 0th axis"""
Vnorm = V - V.max(axis=0)
Vexp = np.exp(Vnorm / scale)
P = Vexp / Vexp.sum(axis=0)
return P
def logsum(V, scale):
"""Logsum formula along 0th axis"""
const = V.max(axis=0)
Vnorm = V - const
EV = const + scale * np.log(np.exp(Vnorm / scale).sum(axis=0))
return EV
def logit_choice(V, scale):
"""Logit choice probabilities and logsum along 0th axis"""
const = V.max(axis=0)
Vnorm = V - const
Vexp = np.exp(Vnorm / scale)
Vexpsum = Vexp.sum(axis=0)
P = Vexp / Vexpsum
EV = const + scale * np.log(Vexpsum)
return P, EV
@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True)
def nonconcave(Va, ilower, iupper):
"""
Let V(..., a) be the value function associated with a non-convex dynamic program. `Va` is its derivative with respect to the **single** continuous state variable `a`.
Find ilower and iupper such that {a_{ilower + 1}, ..., a_{iupper - 1}} is the region where V is non-concave.
Reference: Fella (2014): A generalized endogenous grid method for non-smooth and non-concave problems
"""
nA = Va.shape[-1]
vmin = np.inf
vmax = -np.inf
# Find vmin & vmax
for ia in range(nA - 1):
if Va[ia + 1] > Va[ia]:
vmin_temp = Va[ia]
vmax_temp = Va[ia + 1]
if vmin_temp < vmin:
vmin = vmin_temp
if vmax_temp > vmax:
vmax = vmax_temp
# Find ilower
if vmax == -np.inf:
ilower_ = nA
else:
ia = nA
while ia > 0:
if Va[ia] > vmax:
break
ia -= 1
ilower_ = ia
# Find iupper
if vmin == np.inf:
iupper_ = 0
else:
ia = 0
while ia < nA:
if Va[ia] < vmin:
break
ia += 1
iupper_ = ia
ilower[:] = ilower_
iupper[:] = iupper_
| 5,204 | 27.135135 | 170 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/graph.py | """Topological sort and related code"""
from .ordered_set import OrderedSet
from .bijection import Bijection
class DAG:
"""Represents "blocks" that each have inputs and outputs, where output-input relationships between
blocks form a DAG. Fundamental DAG object intended to underlie CombinedBlock and CombinedExtendedFunction.
Initialized with list of blocks, which are then topologically sorted"""
def __init__(self, blocks):
inmap = get_input_map(blocks)
outmap = get_output_map(blocks)
adj = get_block_adjacency_list(blocks, inmap)
revadj = get_block_reverse_adjacency_list(blocks, outmap)
topsort = topological_sort(adj, revadj, names=[getattr(block, 'name', '[NO BLOCK NAME]') for block in blocks])
M = Bijection({t: i for i, t in enumerate(topsort)})
self.blocks = [blocks[t] for t in topsort]
self.inmap = {k: M @ v for k, v in inmap.items()}
self.outmap = {k: M @ v for k, v in outmap.items()}
self.adj = [M @ adj[t] for t in topsort]
self.revadj = [M @ revadj[t] for t in topsort]
self.inputs = OrderedSet(k for k in inmap if k not in outmap)
self.outputs = OrderedSet(outmap)
def visit_from_inputs(self, inputs):
"""Which block numbers are ultimately dependencies of 'inputs'?"""
inputs = inputs & self.inputs
visited = OrderedSet()
for n, (block, parentset) in enumerate(zip(self.blocks, self.revadj)):
# first see if block has its input directly changed
for i in inputs:
if i in block.inputs:
visited.add(n)
break
else:
if not parentset.isdisjoint(visited):
visited.add(n)
return visited
def visit_from_outputs(self, outputs):
"""Which block numbers are 'outputs' ultimately dependent on?"""
outputs = outputs & self.outputs
visited = OrderedSet()
for n in reversed(range(len(self.blocks))):
block = self.blocks[n]
childset = self.adj[n]
# first see if block has its output directly used
for o in outputs:
if o in block.outputs:
visited.add(n)
break
else:
if not childset.isdisjoint(visited):
visited.add(n)
return reversed(visited)
def topological_sort(adj, revadj, names=None):
"""Given directed graph pointing from each node to the nodes it depends on, topologically sort nodes"""
# get complete set version of dep, and its reversal, and build initial stack of nodes with no dependencies
revdep = adj
dep = [s.copy() for s in revadj]
nodeps = [n for n, depset in enumerate(dep) if not depset]
topsorted = []
# Kahn's algorithm: find something with no dependency, delete its edges and update
while nodeps:
n = nodeps.pop()
topsorted.append(n)
for n2 in revdep[n]:
dep[n2].remove(n)
if not dep[n2]:
nodeps.append(n2)
# should be done: topsorted should be topologically sorted with same # of elements as original graphs!
if len(topsorted) != len(dep):
cycle_ints = find_cycle(dep, set(range(len(dep))) - set(topsorted))
assert cycle_ints is not None, 'topological sort failed but no cycle, THIS SHOULD NEVER EVER HAPPEN'
cycle = [names[i] for i in cycle_ints] if names else cycle_ints
raise Exception(f'Topological sort failed: cyclic dependency {" -> ".join([str(n) for n in cycle])}')
return topsorted
def get_input_map(blocks: list):
"""inmap[i] gives set of block numbers where i is an input"""
inmap = dict()
for num, block in enumerate(blocks):
for i in block.inputs:
inset = inmap.setdefault(i, OrderedSet())
inset.add(num)
return inmap
def get_output_map(blocks: list):
"""outmap[o] gives unique block number where o is an output"""
outmap = dict()
for num, block in enumerate(blocks):
for o in block.outputs:
if o in outmap:
raise ValueError(f'{o} is output twice')
outmap[o] = num
return outmap
def get_block_adjacency_list(blocks, inmap):
"""adj[n] for block number n gives set of block numbers which this block points to"""
adj = []
for block in blocks:
current_adj = OrderedSet()
for o in block.outputs:
# for each output, if that output is used as an input by some blocks, add those blocks to adj
if o in inmap:
current_adj |= inmap[o]
adj.append(current_adj)
return adj
def get_block_reverse_adjacency_list(blocks, outmap):
"""revadj[n] for block number n gives set of block numbers that point to this block"""
revadj = []
for block in blocks:
current_revadj = OrderedSet()
for i in block.inputs:
if i in outmap:
current_revadj.add(outmap[i])
revadj.append(current_revadj)
return revadj
def find_intermediate_inputs(blocks):
# TODO: should be deprecated
"""Find outputs of the blocks in blocks that are inputs to other blocks in blocks.
This is useful to ensure that all of the relevant curlyJ Jacobians (of all inputs to all outputs) are computed.
"""
required = OrderedSet()
outmap = get_output_map(blocks)
for num, block in enumerate(blocks):
if hasattr(block, 'inputs'):
inputs = block.inputs
else:
inputs = OrderedSet(i for o in block for i in block[o])
for i in inputs:
if i in outmap:
required.add(i)
return required
def find_cycle(dep, onlyset):
"""Return list giving cycle if there is one, otherwise None"""
# supposed to look only within 'onlyset', so filter out everything else
# awkward holdover: 'dep' is transformed here into a dict with integer keys
dep = {k: (dep[k] & onlyset) for k in range(len(dep)) if k in onlyset}
tovisit = set(dep.keys())
stack = OrderedSet()
while tovisit or stack:
if stack:
# if stack has something, still need to proceed with DFS
n = stack.top()
if dep[n]:
# if there are any dependencies left, let's look at them
n2 = dep[n].pop()
if n2 in stack:
# we have a cycle, since this is already in our stack
i2loc = stack.index(n2)
return stack[i2loc:] + [stack[i2loc]]
else:
# no cycle, visit this node only if we haven't already visited it
if n2 in tovisit:
tovisit.remove(n2)
stack.add(n2)
else:
# if no dependencies left, then we're done with this node, so let's forget about it
stack.pop(n)
else:
# nothing left on stack, let's start the DFS from something new
n = tovisit.pop()
stack.add(n)
# if we never find a cycle, we're done
return None
| 7,229 | 36.46114 | 118 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/multidim.py | import numpy as np
def multiply_ith_dimension(Pi, i, X):
"""If Pi is a matrix, multiply Pi times the ith dimension of X and return"""
X = X.swapaxes(0, i)
shape = X.shape
X = X.reshape((shape[0], -1))
# iterate forward using Pi
X = Pi @ X
# reverse steps
X = X.reshape((Pi.shape[0], *shape[1:]))
return X.swapaxes(0, i)
def outer(pis):
"""Return n-dimensional outer product of list of n vectors"""
pi = pis[0]
for pi_i in pis[1:]:
pi = np.kron(pi, pi_i)
return pi.reshape(*(len(pi_i) for pi_i in pis))
def batch_multiply_ith_dimension(P, i, X):
"""If P is (D, X.shape) array, multiply P and X along ith dimension of X."""
# standardize arrays
P = P.swapaxes(1, 1 + i)
X = X.swapaxes(0, i)
Pshape = P.shape
P = P.reshape((*Pshape[:2], -1))
X = X.reshape((X.shape[0], -1))
# P[i, j, ...] @ X[j, ...]
X = np.einsum('ijb,jb->ib', P, X)
# original shape and order
X = X.reshape(Pshape[0], *Pshape[2:])
return X.swapaxes(0, i)
| 1,039 | 24.365854 | 80 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/function.py | import re
import inspect
import numpy as np
from .ordered_set import OrderedSet
from . import graph
# TODO: fix this, have it twice (main version in misc) due to circular import problem
# let's make everything point to here for input_list, etc. so that this is unnecessary
def make_tuple(x):
"""If not tuple or list, make into tuple with one element.
Wrapping with this allows user to write, e.g.:
"return r" rather than "return (r,)"
"policy='a'" rather than "policy=('a',)"
"""
return (x,) if not (isinstance(x, tuple) or isinstance(x, list)) else x
def input_list(f):
"""Return list of function inputs (both positional and keyword arguments)"""
return OrderedSet(inspect.signature(f).parameters)
def input_defaults(f):
defaults = {}
for p in inspect.signature(f).parameters.values():
if p.default != p.empty:
defaults[p.name] = p.default
return defaults
def output_list(f):
"""Scans source code of function to detect statement like
'return L, Div'
and reports the list ['L', 'Div'].
Important to write functions in this way when they will be scanned by output_list, for
either SimpleBlock or HetBlock.
"""
return OrderedSet(re.findall('return (.*?)\n', inspect.getsource(f))[-1].replace(' ', '').split(','))
def metadata(f):
name = f.__name__
inputs = input_list(f)
outputs = output_list(f)
return name, inputs, outputs
class ExtendedFunction:
"""Wrapped function that knows its inputs and outputs. Evaluates on dict containing necessary
inputs, returns dict containing outputs by name"""
def __init__(self, f):
if isinstance(f, ExtendedFunction):
self.f, self.name, self.inputs, self.outputs = f.f, f.name, f.inputs, f.outputs
else:
self.f = f
self.name, self.inputs, self.outputs = metadata(f)
def __call__(self, input_dict):
# take subdict of d contained in inputs
# this allows for d not to include all inputs (if there are optional inputs)
input_dict = {k: v for k, v in input_dict.items() if k in self.inputs}
return self.outputs.dict_from(make_tuple(self.f(**input_dict)))
def __repr__(self):
return f'<{type(self).__name__}({self.name}): {self.inputs} -> {self.outputs}>'
def wrapped_call(self, input_dict, preprocess=None, postprocess=None):
if preprocess is not None:
input_dict = {k: preprocess(v) for k, v in input_dict.items() if k in self.inputs}
else:
input_dict = {k: v for k, v in input_dict.items() if k in self.inputs}
output_dict = self.outputs.dict_from(make_tuple(self.f(**input_dict)))
if postprocess is not None:
output_dict = {k: postprocess(v) for k, v in output_dict.items()}
return output_dict
def differentiable(self, input_dict, h=1E-4, twosided=False):
return DifferentiableExtendedFunction(self.f, self.name, self.inputs, self.outputs, input_dict, h, twosided)
class DifferentiableExtendedFunction(ExtendedFunction):
def __init__(self, f, name, inputs, outputs, input_dict, h=1E-4, twosided=False):
self.f, self.name, self.inputs, self.outputs = f, name, inputs, outputs
self.input_dict = input_dict
self.output_dict = None # lazy evaluation of outputs for one-sided diff
self.h = h
self.default_twosided = twosided
def diff(self, shock_dict, h=None, hide_zeros=False, twosided=None):
if twosided is None:
twosided = self.default_twosided
if not twosided:
return self.diff1(shock_dict, h, hide_zeros)
else:
return self.diff2(shock_dict, h, hide_zeros)
def diff1(self, shock_dict, h=None, hide_zeros=False):
if h is None:
h = self.h
if self.output_dict is None:
self.output_dict = self(self.input_dict)
shocked_input_dict = {**self.input_dict,
**{k: self.input_dict[k] + h * shock for k, shock in shock_dict.items() if k in self.input_dict}}
shocked_output_dict = self(shocked_input_dict)
derivative_dict = {k: (shocked_output_dict[k] - self.output_dict[k])/h for k in self.output_dict}
if hide_zeros:
derivative_dict = hide_zero_values(derivative_dict)
return derivative_dict
def diff2(self, shock_dict, h=None, hide_zeros=False):
if h is None:
h = self.h
shocked_input_dict_up = {**self.input_dict,
**{k: self.input_dict[k] + h * shock for k, shock in shock_dict.items() if k in self.input_dict}}
shocked_input_dict_dn = {**self.input_dict,
**{k: self.input_dict[k] - h * shock for k, shock in shock_dict.items() if k in self.input_dict}}
shocked_output_dict_up = self(shocked_input_dict_up)
shocked_output_dict_dn = self(shocked_input_dict_dn)
derivative_dict = {k: (shocked_output_dict_up[k] - shocked_output_dict_dn[k])/(2*h) for k in shocked_output_dict_dn}
if hide_zeros:
derivative_dict = hide_zero_values(derivative_dict)
return derivative_dict
def hide_zero_values(d):
return {k: v for k, v in d.items() if not np.allclose(v, 0)}
class CombinedExtendedFunction(ExtendedFunction):
def __init__(self, fs, name=None):
self.dag = graph.DAG([ExtendedFunction(f) for f in fs])
self.inputs = self.dag.inputs
self.outputs = self.dag.outputs
self.functions = {b.name: b for b in self.dag.blocks}
if name is None:
names = list(self.functions)
if len(names) == 1:
self.name = names[0]
else:
self.name = f'{names[0]}_{names[-1]}'
else:
self.name = name
def __call__(self, input_dict, outputs=None):
functions_to_visit = list(self.functions.values())
if outputs is not None:
functions_to_visit = [functions_to_visit[i] for i in self.dag.visit_from_outputs(outputs)]
results = input_dict.copy()
for f in functions_to_visit:
results.update(f(results))
if outputs is not None:
return {k: results[k] for k in outputs}
else:
return results
def call_on_deviations(self, ss, dev_dict, outputs=None):
functions_to_visit = self.filter(list(self.functions.values()), dev_dict, outputs)
results = {}
input_dict = {**ss, **dev_dict}
for f in functions_to_visit:
out = f(input_dict)
results.update(out)
input_dict.update(out)
if outputs is not None:
return {k: v for k, v in results.items() if k in outputs}
else:
return results
def filter(self, function_list, inputs, outputs=None):
nums_to_visit = self.dag.visit_from_inputs(inputs)
if outputs is not None:
nums_to_visit &= self.dag.visit_from_outputs(outputs)
return [function_list[n] for n in nums_to_visit]
def wrapped_call(self, input_dict, preprocess=None, postprocess=None):
raise NotImplementedError
def add(self, f):
if inspect.isfunction(f) or isinstance(f, ExtendedFunction):
return CombinedExtendedFunction(list(self.functions.values()) + [f])
else:
# otherwise assume f is iterable
return CombinedExtendedFunction(list(self.functions.values()) + list(f))
def remove(self, name):
if isinstance(name, str):
return CombinedExtendedFunction([v for k, v in self.functions.items() if k != name])
else:
# otherwise assume name is iterable
return CombinedExtendedFunction([v for k, v in self.functions.items() if k not in name])
def children(self):
return OrderedSet(self.functions)
def differentiable(self, input_dict, h=1E-5, twosided=False):
return DifferentiableCombinedExtendedFunction(self.functions, self.dag, self.name, self.inputs, self.outputs, input_dict, h, twosided)
class DifferentiableCombinedExtendedFunction(CombinedExtendedFunction, DifferentiableExtendedFunction):
def __init__(self, functions, dag, name, inputs, outputs, input_dict, h=1E-5, twosided=False):
self.dag, self.name, self.inputs, self.outputs = dag, name, inputs, outputs
diff_functions = {}
for k, f in functions.items():
diff_functions[k] = f.differentiable(input_dict, h)
self.diff_functions = diff_functions
self.default_twosided = twosided
def diff(self, shock_dict, h=None, outputs=None, hide_zeros=False, twosided=False):
if twosided is None:
twosided = self.default_twosided
if not twosided:
return self.diff1(shock_dict, h, outputs, hide_zeros)
else:
return self.diff2(shock_dict, h, outputs, hide_zeros)
def diff1(self, shock_dict, h=None, outputs=None, hide_zeros=False):
functions_to_visit = self.filter(list(self.diff_functions.values()), shock_dict, outputs)
shock_dict = shock_dict.copy()
results = {}
for f in functions_to_visit:
out = f.diff1(shock_dict, h, hide_zeros)
results.update(out)
shock_dict.update(out)
if outputs is not None:
return {k: v for k, v in results.items() if k in outputs}
else:
return results
def diff2(self, shock_dict, h=None, outputs=None, hide_zeros=False):
functions_to_visit = self.filter(list(self.diff_functions.values()), shock_dict, outputs)
shock_dict = shock_dict.copy()
results = {}
for f in functions_to_visit:
out = f.diff2(shock_dict, h, hide_zeros)
results.update(out)
shock_dict.update(out)
if outputs is not None:
return {k: v for k, v in results.items() if k in outputs}
else:
return results
| 10,065 | 36.007353 | 150 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/__init__.py | """Utilities relating to: interpolation, forward step/transition, grids and Markov chains, solvers, sorting, etc."""
from . import (bijection, differentiate, discretize, drawdag, function, graph, interpolate,
misc, multidim, optimized_routines, ordered_set, solvers)
| 284 | 56 | 116 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/drawdag.py | import warnings
from sequence_jacobian.blocks.solved_block import SolvedBlock
from sequence_jacobian.blocks.het_block import HetBlock
"""
Adrien's DAG Graph routine, updated for SSJ v1.0
Requires installing graphviz package and executables
https://www.graphviz.org/
On a mac this can be done as follows:
1) Download macports at:
https://www.macports.org/install.php
2) On the command line, install graphviz with macports by typing
sudo port install graphviz
"""
try:
from graphviz import Digraph
from IPython.display import display
def drawdag(model, exogenous=[], unknowns=[], targets=[], leftright=False, save=False, savepath=None):
'''
Routine that draws DAG
:param model: combined block to be represented as dag
:param exogenous: (optional) exogenous variables, to be represented on DAG
:param unknowns: (optional) unknown variables, to be represented on DAG
:param unknowns: (optional) target variables, to be represented on DAG
:bool leftright: if True, plots dag from left to right instead of top to bottom
:return: none
'''
# Start DAG
dot = Digraph(comment='Model DAG')
# Make it left-to-right if asked
if leftright:
dot.attr(rankdir='LR', ratio='compress', center='true')
else:
dot.attr(ratio='auto', center='true')
# add initial nodes (one for exogenous, one for unknowns) provided those are not empty lists
if exogenous:
dot.node('exog', 'exogenous', shape='box')
if unknowns:
dot.node('unknowns', 'unknowns', shape='box')
if targets:
dot.node('targets', 'targets', shape='diamond')
# add nodes sequentially in order
for i, b in enumerate(model.blocks):
if isinstance(b, HetBlock):
dot.node(str(i), b.name + ' [HA, ' + str(i) + ']')
elif isinstance(b, SolvedBlock) :
dot.node(str(i), b.name + ' [solved,' + str(i) + ']')
else:
dot.node(str(i), b.name + ' [' + str(i) + ']')
# nodes from exogenous to i (figure out if needed and draw)
if exogenous:
edgelabel = b.inputs & set(exogenous)
if len(edgelabel) != 0:
edgelabel_list = list(edgelabel)
edgelabel_str = ', '.join(str(e) for e in edgelabel_list)
dot.edge('exog', str(i), label=str(edgelabel_str))
# nodes from unknowns to i (figure out if needed, then draw)
if unknowns:
edgelabel = b.inputs & set(unknowns)
if len(edgelabel) != 0:
edgelabel_list = list(edgelabel)
edgelabel_str = ', '.join(str(e) for e in edgelabel_list)
dot.edge('unknowns', str(i), label=str(edgelabel_str))
# nodes from i to final targets
for target in targets:
if target in b.outputs:
dot.edge(str(i), 'targets', label=target)
# nodes from any interior block to i
for j in model.revadj[i]:
# figure out inputs of i that are also outputs of j
edgelabel = b.inputs & model.blocks[j].outputs
edgelabel_list = list(edgelabel)
edgelabel_str = ', '.join(str(e) for e in edgelabel_list)
# draw edge from j to i
dot.edge(str(j), str(i), label=str(edgelabel_str))
if save:
if savepath is None:
savepath = 'dag/' + model.name
dot.render(savepath, format='png', cleanup=True)
display(dot)
except ImportError:
def drawdag(*args, **kwargs):
warnings.warn("\nAttempted to use `drawdag` when the package `graphviz` has not yet been installed. \n"
"DAG visualization tools, i.e. drawdag, will not produce any figures unless this dependency has been installed. \n"
"If you want to install, try typing 'conda install -c conda-forge python-graphviz' at the terminal,\n"
"or see README for more instructions. Once installed, re-load sequence-jacobian to produce DAG figures.")
| 4,348 | 41.637255 | 137 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/solvers.py | """Simple nonlinear solvers"""
import numpy as np
import warnings
def newton_solver(f, x0, y0=None, tol=1E-9, maxcount=100, backtrack_c=0.5, verbose=True):
"""Simple line search solver for root x satisfying f(x)=0 using Newton direction.
Backtracks if input invalid or improvement is not at least half the predicted improvement.
Parameters
----------
f : function, to solve for f(x)=0, input and output are arrays of same length
x0 : array (n), initial guess for x
y0 : [optional] array (n), y0=f(x0), if already known
tol : [optional] scalar, solver exits successfully when |f(x)| < tol
maxcount : [optional] int, maximum number of Newton steps
backtrack_c : [optional] scalar, fraction to backtrack if step unsuccessful, i.e.
if we tried step from x to x+dx, now try x+backtrack_c*dx
Returns
----------
x : array (n), (approximate) root of f(x)=0
y : array (n), y=f(x), satisfies |y| < tol
"""
x, y = x0, y0
if y is None:
y = f(x)
for count in range(maxcount):
if verbose:
printit(count, x, y)
if np.max(np.abs(y)) < tol:
return x, y
J = obtain_J(f, x, y)
dx = np.linalg.solve(J, -y)
# backtrack at most 29 times
for bcount in range(30):
try:
ynew = f(x + dx)
except ValueError:
if verbose:
print('backtracking\n')
dx *= backtrack_c
else:
predicted_improvement = -np.sum((J @ dx) * y) * ((1 - 1 / 2 ** bcount) + 1) / 2
actual_improvement = (np.sum(y ** 2) - np.sum(ynew ** 2)) / 2
if actual_improvement < predicted_improvement / 2:
if verbose:
print('backtracking\n')
dx *= backtrack_c
else:
y = ynew
x += dx
break
else:
raise ValueError('Too many backtracks, maybe bad initial guess?')
else:
raise ValueError(f'No convergence after {maxcount} iterations')
def broyden_solver(f, x0, y0=None, tol=1E-9, maxcount=100, backtrack_c=0.5, verbose=True):
"""Similar to newton_solver, but solves f(x)=0 using approximate rather than exact Newton direction,
obtaining approximate Jacobian J=f'(x) from Broyden updating (starting from exact Newton at f'(x0)).
Backtracks only if error raised by evaluation of f, since improvement criterion no longer guaranteed
to work for any amount of backtracking if Jacobian not exact.
"""
x, y = x0, y0
if y is None:
y = f(x)
for count in range(maxcount):
if verbose:
printit(count, x, y)
if np.max(np.abs(y)) < tol:
return x, y
# initialize J with Newton!
if count == 0:
J = obtain_J(f, x, y)
if len(x) == len(y):
dx = np.linalg.solve(J, -y)
elif len(x) < len(y):
warnings.warn(f"Dimension of x, {len(x)} is less than dimension of y, {len(y)}."
f" Using least-squares criterion to solve for approximate root.")
dx = np.linalg.lstsq(J, -y, rcond=None)[0]
else:
raise ValueError(f"Dimension of x, {len(x)} is greater than dimension of y, {len(y)}."
f" Cannot solve underdetermined system.")
# backtrack at most 29 times
for bcount in range(30):
# note: can't test for improvement with Broyden because maybe
# the function doesn't improve locally in this direction, since
# J isn't the exact Jacobian
try:
ynew = f(x + dx)
except ValueError:
if verbose:
print('backtracking\n')
dx *= backtrack_c
else:
J = broyden_update(J, dx, ynew - y)
y = ynew
x += dx
break
else:
raise ValueError('Too many backtracks, maybe bad initial guess?')
else:
raise ValueError(f'No convergence after {maxcount} iterations')
def obtain_J(f, x, y, h=1E-5):
"""Finds Jacobian f'(x) around y=f(x)"""
nx = x.shape[0]
ny = y.shape[0]
J = np.empty((ny, nx))
for i in range(nx):
dx = h * (np.arange(nx) == i)
J[:, i] = (f(x + dx) - y) / h
return J
def broyden_update(J, dx, dy):
"""Returns Broyden update to approximate Jacobian J, given that last change in inputs to function
was dx and led to output change of dy."""
return J + np.outer(((dy - J @ dx) / np.linalg.norm(dx) ** 2), dx)
def printit(it, x, y, **kwargs):
"""Convenience printing function for verbose iterations"""
print(f'On iteration {it}')
print(('x = %.3f' + ',%.3f' * (len(x) - 1)) % tuple(x))
print(('y = %.3f' + ',%.3f' * (len(y) - 1)) % tuple(y))
for kw, val in kwargs.items():
print(f'{kw} = {val:.3f}')
print('\n')
| 5,160 | 33.871622 | 104 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/utilities/interpolate.py | """Efficient linear interpolation exploiting monotonicity.
Interpolates increasing query points xq against increasing data points x.
- interpolate_y: (x, xq, y) -> yq
get interpolated values of yq at xq
- interpolate_coord: (x, xq) -> (xqi, xqpi)
get representation xqi, xqpi of xq interpolated against x
xq = xqpi * x[xqi] + (1-xqpi) * x[xqi+1]
- apply_coord: (xqi, xqpi, y) -> yq
use representation xqi, xqpi to get yq at xq
yq = xqpi * y[xqi] + (1-xqpi) * y[xqi+1]
Composing interpolate_coord and apply_coord gives interpolate_y.
All three functions are written for vectors but can be broadcast to other dimensions
since we use Numba's guvectorize decorator. In these cases, interpolation is always
done on the final dimension.
"""
import numpy as np
from numba import njit, guvectorize
@guvectorize(['void(float64[:], float64[:], float64[:], float64[:])'], '(n),(nq),(n)->(nq)')
def interpolate_y(x, xq, y, yq):
"""Efficient linear interpolation exploiting monotonicity.
Complexity O(n+nq), so most efficient when x and xq have comparable number of points.
Extrapolates linearly when xq out of domain of x.
Parameters
----------
x : array (n), ascending data points
xq : array (nq), ascending query points
y : array (n), data points
Returns
----------
yq : array (nq), interpolated points
"""
nxq, nx = xq.shape[0], x.shape[0]
xi = 0
x_low = x[0]
x_high = x[1]
for xqi_cur in range(nxq):
xq_cur = xq[xqi_cur]
while xi < nx - 2:
if x_high >= xq_cur:
break
xi += 1
x_low = x_high
x_high = x[xi + 1]
xqpi_cur = (x_high - xq_cur) / (x_high - x_low)
yq[xqi_cur] = xqpi_cur * y[xi] + (1 - xqpi_cur) * y[xi + 1]
@guvectorize(['void(float64[:], float64[:], uint32[:], float64[:])'], '(n),(nq)->(nq),(nq)')
def interpolate_coord(x, xq, xqi, xqpi):
"""Get representation xqi, xqpi of xq interpolated against x:
xq = xqpi * x[xqi] + (1-xqpi) * x[xqi+1]
Parameters
----------
x : array (n), ascending data points
xq : array (nq), ascending query points
Returns
----------
xqi : array (nq), indices of lower bracketing gridpoints
xqpi : array (nq), weights on lower bracketing gridpoints
"""
nxq, nx = xq.shape[0], x.shape[0]
xi = 0
x_low = x[0]
x_high = x[1]
for xqi_cur in range(nxq):
xq_cur = xq[xqi_cur]
while xi < nx - 2:
if x_high >= xq_cur:
break
xi += 1
x_low = x_high
x_high = x[xi + 1]
xqpi[xqi_cur] = (x_high - xq_cur) / (x_high - x_low)
xqi[xqi_cur] = xi
@guvectorize(['void(int64[:], float64[:], float64[:], float64[:])',
'void(uint32[:], float64[:], float64[:], float64[:])'], '(nq),(nq),(n)->(nq)')
def apply_coord(x_i, x_pi, y, yq):
"""Use representation xqi, xqpi to get yq at xq:
yq = xqpi * y[xqi] + (1-xqpi) * y[xqi+1]
Parameters
----------
xqi : array (nq), indices of lower bracketing gridpoints
xqpi : array (nq), weights on lower bracketing gridpoints
y : array (n), data points
Returns
----------
yq : array (nq), interpolated points
"""
nq = x_i.shape[0]
for iq in range(nq):
y_low = y[x_i[iq]]
y_high = y[x_i[iq]+1]
yq[iq] = x_pi[iq]*y_low + (1-x_pi[iq])*y_high
'''Part 2: More robust linear interpolation that does not require monotonicity in query points.
Intended for general use in interpolating policy rules that we cannot be sure are monotonic.
Only get xqi, xqpi representation, for case where x is one-dimensional, in this application.
'''
def interpolate_coord_robust(x, xq, check_increasing=False):
"""Linear interpolation exploiting monotonicity only in data x, not in query points xq.
Simple binary search, less efficient but more robust.
xq = xqpi * x[xqi] + (1-xqpi) * x[xqi+1]
Main application intended to be universally-valid interpolation of policy rules.
Dimension k is optional.
Parameters
----------
x : array (n), ascending data points
xq : array (k, nq), query points (in any order)
Returns
----------
xqi : array (k, nq), indices of lower bracketing gridpoints
xqpi : array (k, nq), weights on lower bracketing gridpoints
"""
if x.ndim != 1:
raise ValueError('Data input to interpolate_coord_robust must have exactly one dimension')
if check_increasing and np.any(x[:-1] >= x[1:]):
raise ValueError('Data input to interpolate_coord_robust must be strictly increasing')
if xq.ndim == 1:
return interpolate_coord_robust_vector(x, xq)
else:
i, pi = interpolate_coord_robust_vector(x, xq.ravel())
return i.reshape(xq.shape), pi.reshape(xq.shape)
@njit
def interpolate_coord_robust_vector(x, xq):
"""Does interpolate_coord_robust where xq must be a vector, more general function is wrapper"""
n = len(x)
nq = len(xq)
xqi = np.empty(nq, dtype=np.uint32)
xqpi = np.empty(nq)
for iq in range(nq):
if xq[iq] < x[0]:
ilow = 0
elif xq[iq] > x[-2]:
ilow = n-2
else:
# start binary search
# should end with ilow and ihigh exactly 1 apart, bracketing variable
ihigh = n-1
ilow = 0
while ihigh - ilow > 1:
imid = (ihigh + ilow) // 2
if xq[iq] > x[imid]:
ilow = imid
else:
ihigh = imid
xqi[iq] = ilow
xqpi[iq] = (x[ilow+1] - xq[iq]) / (x[ilow+1] - x[ilow])
return xqi, xqpi
'''Used in discrete choice problems'''
@njit
def interpolate_coord_njit(x, xq):
nxq, nx = xq.shape[0], x.shape[0]
xqi = np.empty(nxq, dtype=np.uint32)
xqpi = np.empty(nxq)
xi = 0
x_low = x[0]
x_high = x[1]
for xqi_cur in range(nxq):
xq_cur = xq[xqi_cur]
while xi < nx - 2:
if x_high >= xq_cur:
break
xi += 1
x_low = x_high
x_high = x[xi + 1]
xqpi[xqi_cur] = (x_high - xq_cur) / (x_high - x_low)
xqi[xqi_cur] = xi
return xqi, xqpi
@njit
def apply_coord_njit(x_i, x_pi, y):
nq = x_i.shape[0]
yq = np.empty(nq)
for iq in range(nq):
y_low = y[x_i[iq]]
y_high = y[x_i[iq]+1]
yq[iq] = x_pi[iq]*y_low + (1-x_pi[iq])*y_high
return yq
@njit
def interpolate_point(x, x0, x1, y0, y1):
y = y0 + (x - x0) * (y1 - y0) / (x1 - x0)
return y
| 6,714 | 28.069264 | 99 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/combined_block.py | """CombinedBlock class and the combine function to generate it"""
from .block import Block
from .auxiliary_blocks.jacobiandict_block import JacobianDictBlock
from .support.parent import Parent
from ..classes import ImpulseDict, JacobianDict
from ..utilities.graph import DAG, find_intermediate_inputs
def combine(blocks, name="", model_alias=False):
return CombinedBlock(blocks, name=name, model_alias=model_alias)
# Useful functional alias
def create_model(blocks, **kwargs):
return combine(blocks, model_alias=True, **kwargs)
class CombinedBlock(Block, Parent, DAG):
"""A combined `Block` object comprised of several `Block` objects, which topologically sorts them and provides
a set of partial and general equilibrium methods for evaluating their steady state, computes impulse responses,
and calculates Jacobians along the DAG"""
# To users: Do *not* manually change the attributes via assignment. Instantiating a
# CombinedBlock has some automated features that are inferred from initial instantiation but not from
# re-assignment of attributes post-instantiation.
def __init__(self, blocks, name="", model_alias=False, sorted_indices=None, intermediate_inputs=None):
super().__init__()
blocks_unsorted = [b if isinstance(b, Block) else JacobianDictBlock(b) for b in blocks]
DAG.__init__(self, blocks_unsorted)
# TODO: deprecate this, use DAG methods instead
self._required = find_intermediate_inputs(blocks) if intermediate_inputs is None else intermediate_inputs
if not name:
self.name = f"{self.blocks[0].name}_to_{self.blocks[-1].name}_combined"
else:
self.name = name
# now that it has a name, do Parent initialization
Parent.__init__(self, blocks)
# If the create_model() is used instead of combine(), we will have __repr__ show this object as a 'Model'
self._model_alias = model_alias
def __repr__(self):
if self._model_alias:
return f"<Model '{self.name}'>"
else:
return f"<CombinedBlock '{self.name}'>"
def _steady_state(self, calibration, dissolve, **kwargs):
"""Evaluate a partial equilibrium steady state of the CombinedBlock given a `calibration`"""
ss = calibration.copy()
for block in self.blocks:
# TODO: make this inner_dissolve better, clumsy way to dispatch dissolve only to correct children
inner_dissolve = [k for k in dissolve if self.descendants[k] == block.name]
outputs = block.steady_state(ss, dissolve=inner_dissolve, **kwargs)
ss.update(outputs)
return ss
def _impulse_nonlinear(self, ss, inputs, outputs, internals, Js, options, ss_initial):
original_outputs = outputs
outputs = (outputs | self._required) - ss._vector_valued()
impulses = inputs.copy()
for block in self.blocks:
input_args = {k: v for k, v in impulses.items() if k in block.inputs}
if input_args or ss_initial is not None:
# If this block is actually perturbed, or we start from different initial ss
# TODO: be more selective about ss_initial here - did any inputs change that matter for this one block?
impulses.update(block.impulse_nonlinear(ss, input_args, outputs & block.outputs, internals, Js, options, ss_initial))
return ImpulseDict({k: impulses.toplevel[k] for k in original_outputs if k in impulses.toplevel}, impulses.internals, impulses.T)
def _impulse_linear(self, ss, inputs, outputs, Js, options):
original_outputs = outputs
outputs = (outputs | self._required) - ss._vector_valued()
impulses = inputs.copy()
for block in self.blocks:
input_args = {k: v for k, v in impulses.items() if k in block.inputs}
if input_args: # If this block is actually perturbed
impulses.update(block.impulse_linear(ss, input_args, outputs & block.outputs, Js, options))
return ImpulseDict({k: impulses.toplevel[k] for k in original_outputs if k in impulses.toplevel}, T=impulses.T)
def _partial_jacobians(self, ss, inputs, outputs, T, Js, options):
vector_valued = ss._vector_valued()
inputs = (inputs | self._required) - vector_valued
outputs = (outputs | self._required) - vector_valued
curlyJs = {}
for block in self.blocks:
curlyJ = block.partial_jacobians(ss, inputs & block.inputs, outputs & block.outputs, T, Js, options)
curlyJs.update(curlyJ)
return curlyJs
def _jacobian(self, ss, inputs, outputs, T, Js, options):
Js = self._partial_jacobians(ss, inputs, outputs, T, Js, options)
original_outputs = outputs
total_Js = JacobianDict.identity(inputs)
# TODO: horrible, redoing work from partial_jacobians, also need more efficient sifting of intermediates!
vector_valued = ss._vector_valued()
inputs = (inputs | self._required) - vector_valued
outputs = (outputs | self._required) - vector_valued
for block in self.blocks:
if (inputs & block.inputs) and (outputs & block.outputs):
J = block.jacobian(ss, inputs & block.inputs, outputs & block.outputs, T, Js, options)
total_Js.update(J @ total_Js)
return total_Js[original_outputs & total_Js.outputs, :]
# Useful type aliases
Model = CombinedBlock
| 5,540 | 44.04878 | 137 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/solved_block.py | from .block import Block
from .simple_block import simple
from .support.parent import Parent
from ..classes import FactoredJacobianDict
from ..utilities.ordered_set import OrderedSet
def solved(unknowns, targets, solver=None, solver_kwargs={}, name=""):
"""Convenience @solved(unknowns=..., targets=...) decorator on a single SimpleBlock"""
# call as decorator, return function of function
def singleton_solved_block(f):
return SolvedBlock(simple(f).rename(f.__name__ + '_inner'), f.__name__, unknowns, targets, solver=solver, solver_kwargs=solver_kwargs)
return singleton_solved_block
class SolvedBlock(Block, Parent):
"""SolvedBlocks are mini SHADE models embedded as blocks inside larger SHADE models.
When creating them, we need to provide the basic ingredients of a SHADE model: the list of
blocks comprising the model, the list on unknowns, and the list of targets.
When we use .jac to ask for the Jacobian of a SolvedBlock, we are really solving for the 'G'
matrices of the mini SHADE models, which then become the 'curlyJ' Jacobians of the block.
Similarly, when we use .td to evaluate a SolvedBlock on a path, we are really solving for the
nonlinear transition path such that all internal targets of the mini SHADE model are zero.
"""
def __init__(self, block: Block, name, unknowns, targets, solver=None, solver_kwargs={}):
super().__init__()
# since we dispatch to solve methods, same set of options
self.impulse_nonlinear_options = self.solve_impulse_nonlinear_options
self.steady_state_options = self.solve_steady_state_options
self.block = block
self.name = name
self.unknowns = unknowns
self.targets = targets
self.solver = solver
self.solver_kwargs = solver_kwargs
Parent.__init__(self, [self.block])
# validate unknowns and targets
if not len(unknowns) == len(targets):
raise ValueError(f'Unknowns {set(unknowns)} and targets {set(targets)} different sizes in SolvedBlock {name}')
if not set(unknowns) <= block.inputs:
raise ValueError(f'Unknowns has element {set(unknowns) - block.inputs} not in inputs in SolvedBlock {name}')
if not set(targets) <= block.outputs:
raise ValueError(f'Targets has element {set(targets) - block.outputs} not in outputs in SolvedBlock {name}')
# what are overall outputs and inputs?
self.outputs = block.outputs | set(unknowns)
self.inputs = block.inputs - set(unknowns)
def __repr__(self):
return f"<SolvedBlock '{self.name}'>"
def _steady_state(self, calibration, dissolve, options, **kwargs):
if self.name in dissolve:
kwargs['solver'] = "solved"
unknowns = {k: v for k, v in calibration.items() if k in self.unknowns}
else:
unknowns = self.unknowns
if 'solver' not in kwargs:
# TODO: replace this with default option
kwargs['solver'] = self.solver
return self.block.solve_steady_state(calibration, unknowns, self.targets, options, **kwargs)
def _impulse_nonlinear(self, ss, inputs, outputs, internals, Js, options, ss_initial, **kwargs):
return self.block.solve_impulse_nonlinear(ss, OrderedSet(self.unknowns), OrderedSet(self.targets),
inputs, outputs, internals, Js, options, self._get_H_U_factored(Js), ss_initial, **kwargs)
def _impulse_linear(self, ss, inputs, outputs, Js, options):
return self.block.solve_impulse_linear(ss, OrderedSet(self.unknowns), OrderedSet(self.targets),
inputs, outputs, Js, options, self._get_H_U_factored(Js))
def _jacobian(self, ss, inputs, outputs, T, Js, options):
return self.block.solve_jacobian(ss, OrderedSet(self.unknowns), OrderedSet(self.targets),
inputs, outputs, T, Js, options, self._get_H_U_factored(Js))[outputs]
def _partial_jacobians(self, ss, inputs, outputs, T, Js, options):
# call it on the child first
inner_Js = self.block.partial_jacobians(ss, (OrderedSet(self.unknowns) | inputs),
(OrderedSet(self.targets) | outputs - self.unknowns.keys()), T, Js, options)
# with these inner Js, also compute H_U and factorize
H_U = self.block.jacobian(ss, OrderedSet(self.unknowns), OrderedSet(self.targets), T, inner_Js, options)
H_U_factored = FactoredJacobianDict(H_U, T)
return {**inner_Js, self.name: H_U_factored}
def _get_H_U_factored(self, Js):
if self.name in Js and isinstance(Js[self.name], FactoredJacobianDict):
return Js[self.name]
else:
return None
| 4,847 | 47.48 | 142 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/block.py | """Primitives to provide clarity and structure on blocks/models work"""
import numpy as np
from numbers import Real
from typing import Any, Dict, Union, Tuple, Optional, List
from copy import deepcopy
from .support.steady_state import provide_solver_default, solve_for_unknowns, compute_target_values
from .support.parent import Parent
from ..utilities import misc
from ..utilities.function import input_defaults
from ..utilities.bijection import Bijection
from ..utilities.ordered_set import OrderedSet
from ..classes import SteadyStateDict, UserProvidedSS, ImpulseDict, JacobianDict, FactoredJacobianDict
Array = Any
class Block:
"""The abstract base class for all `Block` objects."""
def __init__(self):
self.M = Bijection({})
self.steady_state_options = self.input_defaults_smart('_steady_state')
self.impulse_nonlinear_options = self.input_defaults_smart('_impulse_nonlinear')
self.impulse_linear_options = self.input_defaults_smart('_impulse_linear')
self.jacobian_options = self.input_defaults_smart('_jacobian')
self.partial_jacobians_options = self.input_defaults_smart('_partial_jacobians')
def inputs(self):
pass
def outputs(self):
pass
def steady_state(self, calibration: Union[SteadyStateDict, UserProvidedSS],
dissolve: List[str] = [], options: Dict[str, dict] = {}, **kwargs) -> SteadyStateDict:
"""Evaluate a partial equilibrium steady state of Block given a `calibration`."""
inputs = self.inputs.copy()
if isinstance(self, Parent):
for k in dissolve:
inputs |= self.get_attribute(k, 'unknowns').keys()
calibration = SteadyStateDict(calibration)[inputs]
own_options = self.get_options(options, kwargs, 'steady_state')
if isinstance(self, Parent):
return self.M @ self._steady_state(self.M.inv @ calibration, dissolve=dissolve,
options=options, **own_options)
else:
return self.M @ self._steady_state(self.M.inv @ calibration, **own_options)
def impulse_nonlinear(self, ss: SteadyStateDict, inputs: Union[Dict[str, Array], ImpulseDict],
outputs: Optional[List[str]] = None,
internals: Union[Dict[str, List[str]], List[str]] = {},
Js: Dict[str, JacobianDict] = {}, options: Dict[str, dict] = {},
ss_initial: Optional[SteadyStateDict] = None, **kwargs) -> ImpulseDict:
"""Calculate a partial equilibrium, non-linear impulse response of `outputs` to a set of shocks in `inputs`
around a steady state `ss`."""
own_options = self.get_options(options, kwargs, 'impulse_nonlinear')
inputs = ImpulseDict(inputs)
actual_outputs, inputs_as_outputs = self.process_outputs(ss,
self.make_ordered_set(inputs), self.make_ordered_set(outputs))
if isinstance(self, Parent):
# SolvedBlocks may use Js and may be nested in a CombinedBlock, so we need to pass them down to any parent
out = self.M @ self._impulse_nonlinear(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ actual_outputs, internals, Js, options, self.M.inv @ ss_initial, **own_options)
elif hasattr(self, 'internals'):
out = self.M @ self._impulse_nonlinear(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ actual_outputs, self.internals_to_report(internals), self.M.inv @ ss_initial, **own_options)
else:
out = self.M @ self._impulse_nonlinear(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ actual_outputs, self.M.inv @ ss_initial, **own_options)
return inputs[inputs_as_outputs] | out
def impulse_linear(self, ss: SteadyStateDict, inputs: Union[Dict[str, Array], ImpulseDict],
outputs: Optional[List[str]] = None, Js: Dict[str, JacobianDict] = {},
options: Dict[str, dict] = {}, **kwargs) -> ImpulseDict:
"""Calculate a partial equilibrium, linear impulse response of `outputs` to a set of shocks in `inputs`
around a steady state `ss`."""
own_options = self.get_options(options, kwargs, 'impulse_linear')
inputs = ImpulseDict(inputs)
actual_outputs, inputs_as_outputs = self.process_outputs(ss, self.make_ordered_set(inputs), self.make_ordered_set(outputs))
if isinstance(self, Parent):
out = self.M @ self._impulse_linear(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ actual_outputs, Js, options, **own_options)
else:
out = self.M @ self._impulse_linear(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ actual_outputs, Js, **own_options)
return inputs[inputs_as_outputs] | out
def partial_jacobians(self, ss: SteadyStateDict, inputs: Optional[List[str]] = None, outputs: Optional[List[str]] = None,
T: Optional[int] = None, Js: Dict[str, JacobianDict] = {}, options: Dict[str, dict] = {}, **kwargs):
if inputs is None:
inputs = self.inputs
if outputs is None:
outputs = self.outputs
# if you have a J for this block that already has everything you need, use it
# TODO: add check for T, maybe look at verify_saved_jacobian for ideas?
if (self.name in Js) and isinstance(Js[self.name], JacobianDict) and (inputs <= Js[self.name].inputs) and (outputs <= Js[self.name].outputs):
return {self.name: Js[self.name][outputs, inputs]}
# if it's a leaf, just call Jacobian method, include if nonzero
if not isinstance(self, Parent):
own_options = self.get_options(options, kwargs, 'jacobian')
jac = self.jacobian(ss, inputs, outputs, T, **own_options)
return {self.name: jac} if jac else {}
# otherwise call child method with remapping (and remap your own but none of the child Js)
own_options = self.get_options(options, kwargs, 'partial_jacobians')
partial = self._partial_jacobians(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ outputs, T, Js, options, **own_options)
if self.name in partial:
partial[self.name] = self.M @ partial[self.name]
return partial
def jacobian(self, ss: SteadyStateDict, inputs: List[str],
outputs: Optional[List[str]] = None,
T: Optional[int] = None, Js: Dict[str, JacobianDict] = {},
options: Dict[str, dict] = {}, **kwargs) -> JacobianDict:
"""Calculate a partial equilibrium Jacobian to a set of `input` shocks at a steady state `ss`."""
own_options = self.get_options(options, kwargs, 'jacobian')
inputs = self.make_ordered_set(inputs)
outputs, _ = self.process_outputs(ss, {}, self.make_ordered_set(outputs))
# if you have a J for this block that has everything you need, use it
if (self.name in Js) and isinstance(Js[self.name], JacobianDict) and (inputs <= Js[self.name].inputs) and (outputs <= Js[self.name].outputs):
return Js[self.name][outputs, inputs]
# if it's a leaf, call Jacobian method, don't supply Js
if not isinstance(self, Parent):
return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ outputs, T, **own_options)
# otherwise remap own J (currently needed for SolvedBlock only)
Js = Js.copy()
if self.name in Js:
Js[self.name] = self.M.inv @ Js[self.name]
return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ inputs, self.M.inv @ outputs, T=T, Js=Js, options=options, **own_options)
solve_steady_state_options = dict(solver="", solver_kwargs={}, ttol=1e-12, ctol=1e-9,
verbose=False, constrained_method="linear_continuation", constrained_kwargs={})
def solve_steady_state(self, calibration: Dict[str, Union[Real, Array]],
unknowns: Dict[str, Union[Real, Tuple[Real, Real]]],
targets: Union[Array, Dict[str, Union[str, Real]]],
dissolve: List = [], options: Dict[str, dict] = {}, **kwargs):
"""Evaluate a general equilibrium steady state of Block given a `calibration`
and a set of `unknowns` and `targets` corresponding to the endogenous variables to be solved for and
the target conditions that must hold in general equilibrium"""
options = self.get_options(options, kwargs, 'solve_steady_state')
ss = SteadyStateDict(calibration)
solver = options['solver'] if options['solver'] else provide_solver_default(unknowns)
def residual(unknown_values, unknowns_keys=unknowns.keys(), targets=targets):
ss.update(misc.smart_zip(unknowns_keys, unknown_values))
ss.update(self.steady_state(ss, dissolve=dissolve, options=options, **kwargs))
return compute_target_values(targets, ss)
_ = solve_for_unknowns(residual, unknowns, solver, options['solver_kwargs'],
tol=options['ttol'], verbose=options['verbose'],
constrained_method=options['constrained_method'],
constrained_kwargs=options['constrained_kwargs'])
return ss
solve_impulse_nonlinear_options = dict(tol=1E-8, maxit=30, verbose=True)
def solve_impulse_nonlinear(self, ss: SteadyStateDict, unknowns: List[str], targets: List[str],
inputs: Union[Dict[str, Array], ImpulseDict], outputs: Optional[List[str]] = None,
internals: Union[Dict[str, List[str]], List[str]] = {}, Js: Dict[str, JacobianDict] = {},
options: Dict[str, dict] = {}, H_U_factored: Optional[FactoredJacobianDict] = None,
ss_initial: Optional[SteadyStateDict] = None, **kwargs) -> ImpulseDict:
"""Calculate a general equilibrium, non-linear impulse response to a set of shocks in `inputs`
around a steady state `ss`, given a set of `unknowns` and `targets` corresponding to the endogenous
variables to be solved for and the `targets` that must hold in general equilibrium"""
inputs = ImpulseDict(inputs)
unknowns, targets = OrderedSet(unknowns), OrderedSet(targets)
input_names = self.make_ordered_set(inputs)
actual_outputs, inputs_as_outputs = self.process_outputs(ss, input_names | unknowns, self.make_ordered_set(outputs))
T = inputs.T
Js = self.partial_jacobians(ss, input_names | unknowns, (actual_outputs | targets) - unknowns, T, Js, options, **kwargs)
if H_U_factored is None:
H_U = self.jacobian(ss, unknowns, targets, T, Js, options, **kwargs)
H_U_factored = FactoredJacobianDict(H_U, T)
options = self.get_options(options, kwargs, 'solve_impulse_nonlinear')
# Newton's method
U = ImpulseDict({k: np.zeros(T) for k in unknowns})
if options['verbose']:
print(f'Solving {self.name} for {unknowns} to hit {targets}')
for it in range(options['maxit']):
results = self.impulse_nonlinear(ss, inputs | U, actual_outputs | targets, internals, Js, options, ss_initial, **kwargs)
errors = {k: np.max(np.abs(results[k])) for k in targets}
if options['verbose']:
print(f'On iteration {it}')
for k in errors:
print(f' max error for {k} is {errors[k]:.2E}')
if all(v < options['tol'] for v in errors.values()):
break
else:
U += H_U_factored.apply(results)
else:
raise ValueError(f'No convergence after {options["maxit"]} backward iterations!')
return (inputs | U)[inputs_as_outputs] | results
solve_impulse_linear_options = {}
def solve_impulse_linear(self, ss: SteadyStateDict, unknowns: List[str], targets: List[str],
inputs: Union[Dict[str, Array], ImpulseDict], outputs: Optional[List[str]] = None,
Js: Optional[Dict[str, JacobianDict]] = {}, options: Dict[str, dict] = {},
H_U_factored: Optional[FactoredJacobianDict] = None, **kwargs) -> ImpulseDict:
"""Calculate a general equilibrium, linear impulse response to a set of shocks in `inputs`
around a steady state `ss`, given a set of `unknowns` and `targets` corresponding to the endogenous
variables to be solved for and the target conditions that must hold in general equilibrium"""
inputs = ImpulseDict(inputs)
unknowns, targets = OrderedSet(unknowns), OrderedSet(targets)
input_names = self.make_ordered_set(inputs)
actual_outputs, inputs_as_outputs = self.process_outputs(ss, input_names | unknowns, self.make_ordered_set(outputs))
T = inputs.T
Js = self.partial_jacobians(ss, input_names | unknowns, (actual_outputs | targets) - unknowns, T, Js, options, **kwargs)
dH = self.impulse_linear(ss, inputs, targets, Js, options, **kwargs).get(targets) # .get(targets) fills in zeros
if H_U_factored is None:
H_U = self.jacobian(ss, unknowns, targets, T, Js, options, **kwargs).pack(T)
dU = ImpulseDict.unpack(-np.linalg.solve(H_U, dH.pack()), unknowns, T)
else:
dU = H_U_factored @ dH
return (inputs | dU)[inputs_as_outputs] | self.impulse_linear(ss, dU | inputs, actual_outputs, Js, options, **kwargs)
solve_jacobian_options = {}
def solve_jacobian(self, ss: SteadyStateDict, unknowns: List[str], targets: List[str],
inputs: List[str], outputs: Optional[List[str]] = None, T: int = 300,
Js: Dict[str, JacobianDict] = {}, options: Dict[str, dict] = {},
H_U_factored: Optional[FactoredJacobianDict] = None, **kwargs) -> JacobianDict:
"""Calculate a general equilibrium Jacobian to a set of `exogenous` shocks
at a steady state `ss`, given a set of `unknowns` and `targets` corresponding to the endogenous
variables to be solved for and the target conditions that must hold in general equilibrium"""
inputs, unknowns = self.make_ordered_set(inputs), self.make_ordered_set(unknowns)
actual_outputs, unknowns_as_outputs = self.process_outputs(ss, unknowns, self.make_ordered_set(outputs))
Js = self.partial_jacobians(ss, inputs | unknowns, (actual_outputs | targets) - unknowns, T, Js, options, **kwargs)
H_Z = self.jacobian(ss, inputs, targets, T, Js, options, **kwargs)
if H_U_factored is None:
H_U = self.jacobian(ss, unknowns, targets, T, Js, options, **kwargs).pack(T)
U_Z = JacobianDict.unpack(-np.linalg.solve(H_U, H_Z.pack(T)), unknowns, inputs, T)
else:
U_Z = H_U_factored @ H_Z
from sequence_jacobian import combine
self_with_unknowns = combine([U_Z, self])
return self_with_unknowns.jacobian(ss, inputs, unknowns_as_outputs | actual_outputs, T, Js, options, **kwargs)
def solved(self, unknowns, targets, name=None, solver=None, solver_kwargs=None):
if name is None:
name = self.name + "_solved"
from .solved_block import SolvedBlock
return SolvedBlock(self, name, unknowns, targets, solver, solver_kwargs)
def remap(self, map: Dict[str, str]):
other = deepcopy(self)
other.M = self.M @ Bijection(map)
other.inputs = other.M @ self.inputs
other.outputs = other.M @ self.outputs
if hasattr(self, 'input_list'):
other.input_list = other.M @ self.input_list
if hasattr(self, 'output_list'):
other.output_list = other.M @ self.output_list
if hasattr(self, 'non_back_iter_outputs'):
other.non_back_iter_outputs = other.M @ self.non_back_iter_outputs
return other
def rename(self, name: Optional[str] = None, suffix: Optional[str] = None):
"""Convention: specify suffix kwarg if called on Parent."""
if isinstance(self, Parent):
other = deepcopy(self)
other.name = self.name + suffix
if hasattr(self, 'blocks'):
other.blocks = [b.rename(name, suffix) for b in self.blocks]
Parent.__init__(other, other.blocks)
elif hasattr(self, 'block'):
other.block = self.block.rename_top(self.block.name + suffix)
Parent.__init__(other, [other.block])
return other
else:
if suffix is None:
# called rename on singleton block
return self.rename_top(name)
else:
# called rename on Parent, reached leaf
return self.rename_top(self.name + suffix)
def rename_top(self, name: str):
other = deepcopy(self)
other.name = name
return other
def default_inputs_outputs(self, ss: SteadyStateDict, inputs, outputs):
# TODO: there should be checks to make sure you don't ask for multidimensional stuff for Jacobians?
# should you be allowed to ask for it (even if not default) for impulses?
if inputs is None:
inputs = self.inputs
if outputs is None:
outputs = self.outputs - ss._vector_valued()
return OrderedSet(inputs), OrderedSet(outputs)
def process_outputs(self, ss, inputs: OrderedSet, outputs: Optional[OrderedSet]):
if outputs is None:
actual_outputs = self.outputs - ss._vector_valued()
inputs_as_outputs = inputs
else:
actual_outputs = outputs & self.outputs
inputs_as_outputs = outputs & inputs
return actual_outputs, inputs_as_outputs
@staticmethod
def make_ordered_set(x):
if x is not None and not isinstance(x, OrderedSet):
return OrderedSet(x)
else:
return x
def get_options(self, options: dict, kwargs, method):
own_options = getattr(self, method + "_options")
if self.name in options:
merged = {**own_options, **options[self.name], **kwargs}
else:
merged = {**own_options, **kwargs}
return {k: merged[k] for k in own_options}
def input_defaults_smart(self, methodname):
method = getattr(self, methodname, None)
if method is None:
return {}
else:
return input_defaults(method)
def internals_to_report(self, internals):
if self.name in internals:
if isinstance(internals, dict):
# if internals is a dict, we've specified which internals we want from each block
return internals[self.name]
else:
# otherwise internals is some kind of iterable or set, and if we're in it, we want everything
return self.internals
else:
return []
| 19,172 | 51.528767 | 194 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/stage_block.py | from typing import List, Optional
import numpy as np
import copy
from .block import Block
from .het_block import HetBlock
from ..classes import SteadyStateDict, JacobianDict, ImpulseDict
from ..utilities.ordered_set import OrderedSet
from ..utilities.function import ExtendedFunction, CombinedExtendedFunction
from ..utilities.bijection import Bijection
from ..utilities.optimized_routines import within_tolerance
from .. import utilities as utils
from .support.law_of_motion import LawOfMotion
from .support.stages import Stage
class StageBlock(Block):
def __init__(self, stages: List[Stage], backward_init=None, hetinputs=None, name=None):
super().__init__()
inputs = OrderedSet([])
outputs = OrderedSet([])
stages = make_all_into_stages(stages)
for i, stage in enumerate(stages):
# external inputs are whatever you don't take from next stage
inputs |= (stage.inputs - stages[(i+1) % len(stages)].backward_outputs)
outputs |= stage.report
# TODO: should have internals
self.constructor_checks(stages, inputs, outputs)
self.stages = stages
self.inputs = inputs
self.outputs = OrderedSet([o.upper() for o in outputs])
self.M_outputs = Bijection({o: o.upper() for o in outputs})
self.save_original()
if name is None:
name = stages[0].name + "_to_" + stages[-1].name
self.name = name
if hetinputs is not None:
hetinputs = CombinedExtendedFunction(hetinputs)
self.process_hetinputs(hetinputs, tocopy=False)
if backward_init is not None:
backward_init = ExtendedFunction(backward_init)
self.backward_init = backward_init
@staticmethod
def constructor_checks(stages, inputs, outputs):
# inputs, outputs, and combined backward should not overlap at all
if not inputs.isdisjoint(outputs):
raise ValueError(f'inputs and outputs have overlap {inputs & outputs}')
backward_all = set().union(*(stage.backward_outputs for stage in stages))
if not inputs.isdisjoint(backward_all):
raise ValueError(f'Some stage taking another non-immediate-successor stage backward {inputs & backward_all} as input')
if not outputs.isdisjoint(backward_all):
raise ValueError(f'Outputs and backward have overlap {outputs & backward_all}')
# 'D', 'law_of_motion' are protected names; outputs should not be upper case
for stage in stages:
if stage.name in ['D', 'law_of_motion']:
raise ValueError(f"Stage '{stage.name}' has invalid name")
for o in stage.report:
if o in ['d', 'law_of_motion']:
raise ValueError(f"Stages are not allowed to return outputs called 'd' or 'law_of_motion' but stage '{stage.name}' does")
if o.isupper():
raise ValueError(f"Stages are not allowed to report upper-case outputs. Stage '{stage.name}' has an output '{o}'")
def __repr__(self):
return f"<StageBlock '{self.name}' with stages {[k.name for k in self.stages]}>"
def _steady_state(self, calibration, backward_tol=1E-9, backward_maxit=5000,
forward_tol=1E-10, forward_maxit=100_000):
ss = self.extract_ss_dict(calibration)
hetinputs = self.return_hetinputs(ss)
ss.update(hetinputs)
self.initialize_backward(ss)
backward, report, lom = self.backward_steady_state(ss, backward_tol, backward_maxit)
# get initialized distribution
try:
Dinit = ss[self.stages[0].name]['D']
except KeyError:
# assume that beginning-of-first-stage distribution is uniform, with
# same dimensions as ANY backward input to final stage / backward output from first stage
backward_last = backward[-1]
backward_example = backward_last[list(backward_last)[0]]
Dinit = np.full(backward_example.shape, 1/backward_example.size)
D = self.forward_steady_state(Dinit, lom, forward_tol, forward_maxit)
aggregates = {}
# initialize internals with hetinputs, then add stage-level internals
internals = hetinputs
for i, stage in enumerate(self.stages):
# aggregate everything to report
for k in stage.report:
aggregates[k.upper()] = np.vdot(D[i], report[i][k])
# put individual-level report, end-of-stage backward, and beginning-of-stage dist in internals
internals[stage.name] = {**backward[i], **report[i],
'law_of_motion': lom[i], 'D': D[i]}
# put all inputs to the block into aggregates
for k in self.M.inv @ self.inputs:
aggregates[k] = ss[k]
return SteadyStateDict(aggregates, {self.name: internals})
def _impulse_nonlinear(self, ssin, inputs, outputs, ss_initial):
ss = self.extract_ss_dict(ssin)
if ss_initial is not None:
ss[self.stages[0].name]['D'] = ss_initial[self.name][self.stages[0].name]['D']
# report_path is dict(stage: {output: TxN-dim array})
# lom_path is list[t][stage] in chronological order
report_path, lom_path = self.backward_nonlinear(ss, inputs)
# D_path is dict(stage: TxN-dim array)
D_path = self.forward_nonlinear(ss, lom_path)
aggregates = {}
for stage in self.stages:
for o in stage.report:
if self.M_outputs @ o in outputs:
aggregates[self.M_outputs @ o] = utils.optimized_routines.fast_aggregate(D_path[stage.name], report_path[stage.name][o])
return ImpulseDict(aggregates, T=inputs.T) - ssin
def _impulse_linear(self, ss, inputs, outputs, Js):
return ImpulseDict(self._jacobian(ss, list(inputs.keys()), outputs, inputs.T).apply(inputs))
def _jacobian(self, ss, inputs, outputs, T):
ss = self.extract_ss_dict(ss)
outputs = self.M_outputs.inv @ outputs
differentiable_hetinput = self.preliminary_hetinput(ss, h=1E-4)
backward_data, forward_data, expectations_data = self.preliminary_all_stages(ss)
# step 1
curlyYs, curlyDs = {}, {}
for i in inputs:
curlyYs[i], curlyDs[i] = self.backward_fakenews(i, outputs, T, backward_data, forward_data, differentiable_hetinput)
# step 2
curlyEs = {}
for o in outputs:
curlyEs[o] = self.expectation_vectors(o, T-1, expectations_data)
# steps 3-4
F, J = {}, {}
for o in outputs:
for i in inputs:
if o.upper() not in F:
F[o.upper()] = {}
if o.upper() not in J:
J[o.upper()] = {}
F[o.upper()][i] = HetBlock.build_F(curlyYs[i][o], curlyDs[i], curlyEs[o])
J[o.upper()][i] = HetBlock.J_from_F(F[o.upper()][i])
return JacobianDict(J, name=self.name, T=T)
'''Steady-state backward and forward methods'''
def backward_steady_state(self, ss, tol=1E-9, maxit=5000):
# 'backward' will be dict with backward output of first stage
# (i.e. input to last stage) from the most recent time iteration
# initializer for first iteration should be in 'ss'
backward = {k: ss[k] for k in self.stages[0].backward_outputs}
# iterate until end-of-final-stage backward inputs converge
for it in range(maxit):
backward_new = self.backward_step_steady_state(backward, ss)
if it % 10 == 0 and all(within_tolerance(backward_new[k], backward[k], tol) for k in backward):
break
backward = backward_new
else:
raise ValueError(f'No convergence after {maxit} backward iterations!')
# one more iteration to get backward INPUTS, reported outputs, and law of motion for all stages
return self.backward_step_nonlinear(backward, ss)[:3]
def backward_step_steady_state(self, backward, inputs):
"""Iterate backward through all stages for a single period, ignoring reported outputs"""
for stage in reversed(self.stages):
backward, _ = stage.backward_step_separate({**inputs, **backward})
return backward
def backward_step_nonlinear(self, backward, inputs):
# append backward INPUT to final stage
backward_all = [backward]
report_all = []
lom_all = []
for stage in reversed(self.stages):
(backward, report), lom = stage.backward_step_separate({**inputs, **backward}, lawofmotion=True, hetoutputs=True)
# append backward OUTPUT, reported outputs, and law of motion for each stage, in reverse chronological order
backward_all.append(backward)
report_all.append(report)
lom_all.append(lom)
# return backward INPUT, report, and lom for each stage, with stages now in chronological order
# (to get backward inputs, skip first chronological entry of backward_all, which is backward output of first stage,
# return that entry separately as the fourth output of this function)
return backward_all[::-1][1:], report_all[::-1], lom_all[::-1], backward_all[-1]
def forward_steady_state(self, D, lom: List[LawOfMotion], tol=1E-10, maxit=100_000):
"""Find steady-state beginning-of-stage distributions for all stages"""
# iterate until beginning-of-stage distribution for first stage converges
for it in range(maxit):
D_new = self.forward_step_steady_state(D, lom)
if it % 10 == 0 and within_tolerance(D, D_new, tol):
break
D = D_new
else:
raise ValueError(f'No convergence after {maxit} forward iterations!')
# one more iteration to get beginning-of-stage in *all* stages
return self.forward_step_nonlinear(D, lom)[0]
def forward_step_steady_state(self, D, loms: List[LawOfMotion]):
"""Given beginning-of-first-stage distribution, apply laws of motion in 'loms'
for each stage to get end-of-final-stage distribution, which is returned"""
for lom in loms:
D = lom @ D
return D
def forward_step_nonlinear(self, D, loms: List[LawOfMotion]):
Ds = [D]
for i, lom in enumerate(loms):
Ds.append(lom @ Ds[i])
# return all beginning-of-stage Ds this period, then beginning-of-period next period
return Ds[:-1], Ds[-1]
'''Nonlinear backward and forward methods'''
def backward_nonlinear(self, ss, inputs):
indict = ss.copy()
T = inputs.T
# populate backward with steady-state backward inputs to final stage (stored under final stage in ss dict)
backward = {k: ss[self.stages[-1].name][k] for k in self.stages[0].backward_outputs}
# report_path is dict(stage: {output: TxN-dim array})
report_path = {stage.name: {o: np.empty((T,) + ss[stage.name][o].shape) for o in stage.report} for stage in self.stages}
lom_path = []
for t in reversed(range(T)):
indict.update({k: ss[k] + v[t, ...] for k, v in inputs.items()})
hetinputs = self.return_hetinputs(indict)
indict.update(hetinputs)
# get reports and lom from each stage, backward output of first stage (to feed into next iteration)
_, report, lom, backward = self.backward_step_nonlinear(backward, indict)
for j, stage in enumerate(self.stages):
for o in stage.report:
report_path[stage.name][o][t, ...] = report[j][o]
lom_path.append(lom)
return report_path, lom_path[::-1]
def forward_nonlinear(self, ss, lom_path):
T = len(lom_path)
Dbeg = ss[self.stages[0].name]['D']
D_path = {stage.name: np.empty((T,) + ss[stage.name]['D'].shape) for stage in self.stages}
for t in range(T):
# iterate forward from beginning-of-first-stage distribution in Dbeg to get
# (1) beginning-of-stage distributions for all stages (in D)
# (2) end-of-final-stage distribution, used for next period's beginning-of-first-stage dist (in Dbeg)
D, Dbeg = self.forward_step_nonlinear(Dbeg, lom_path[t])
for j, stage in enumerate(self.stages):
D_path[stage.name][t, ...] = D[j]
return D_path
'''Jacobian calculation: four parts of fake news algorithm, plus support methods'''
def backward_fakenews(self, input_shocked, output_list, T, backward_data, forward_data, differentiable_hetinput):
din_dict = {input_shocked: 1}
if differentiable_hetinput is not None and input_shocked in differentiable_hetinput.inputs:
din_dict.update(differentiable_hetinput.diff(din_dict))
curlyV, curlyD, curlyY = self.backward_step_fakenews(din_dict, output_list, backward_data, forward_data)
# infer dimensions from this, initialize empty arrays, and fill in contemporaneous effect
curlyDs = np.empty((T,) + curlyD.shape)
curlyYs = {k: np.empty(T) for k in curlyY.keys()}
curlyDs[0, ...] = curlyD
for k in curlyY.keys():
curlyYs[k][0] = curlyY[k]
# fill in anticipation effects of shock up to horizon T
for t in range(1, T):
curlyV, curlyDs[t, ...], curlyY = self.backward_step_fakenews(curlyV, output_list, backward_data, forward_data)
for k in curlyY.keys():
curlyYs[k][t] = curlyY[k]
return curlyYs, curlyDs
def backward_step_fakenews(self, din_dict, output_list, backward_data, forward_data):
"""Given shocks to this period's inputs in 'din_dict', calculate perturbation to
first-stage backward outputs (curlyV), to final-stage end-of-stage distribution (curlyD),
and to any aggregate outputs that are in 'output_list' (curlyY)"""
dback = {} # perturbations to backward outputs from most recent stage
dloms = [] # list of perturbations to law of motion from all stages (initially in reverse order)
curlyY = {} # perturbations to aggregate outputs
# go backward through stages, pick up shocks to law of motion
# and also the part of curlyY not coming through the distribution
for stage, ss, D, lom, precomp, hetoutputs in backward_data:
din_all = {**din_dict, **dback}
dout, dlom = stage.backward_step_shock(ss, din_all, precomp)
dloms.append(dlom)
dback = {k: dout[k] for k in stage.backward_outputs}
if hetoutputs is not None and output_list & hetoutputs.outputs:
din_all.update(dout)
dout.update(hetoutputs.diff(din_all, outputs=output_list & hetoutputs.outputs))
# if policy is perturbed for k in output_list, add this to curlyY
# (effect of perturbed distribution is added separately below)
for k in stage.report:
if k in output_list:
if k in dout.keys():
curlyY[k] = np.vdot(D, dout[k])
else:
curlyY[k] = 0
curlyV = dback
# forward through stages, accumulate to find perturbation to D
dD = None
for (stage, ss, D, lom), dlom in zip(forward_data, dloms[::-1]):
# if dD is not None, add consequences for curlyY
if dD is not None:
for k in stage.report:
if k in output_list:
if k in curlyY:
curlyY[k] += np.vdot(dD, ss[k])
else:
curlyY[k] = np.vdot(dD, ss[k])
# advance the dD to next stage
if dD is not None:
dD = lom @ dD
if dlom is not None:
dD += dlom @ D
elif dlom is not None:
dD = dlom @ D
curlyD = dD
return curlyV, curlyD, curlyY
def expectation_vectors(self, o, T, expectations_data):
"""Expectation vector giving expected value of output o, from any stage,
T periods from now, at the beginning of the first stage
(demeaned for numerical reasons, which doesn't affect product with curlyD)."""
curlyE0 = self.expectations_beginning_of_period(o, expectations_data)
curlyEs = np.empty((T,) + curlyE0.shape)
curlyEs[0] = utils.misc.demean(curlyE0)
for t in range(1, T):
curlyEs[t] = utils.misc.demean(
self.expectation_step_fakenews(curlyEs[t-1], expectations_data))
return curlyEs
def expectations_beginning_of_period(self, o, expectations_data):
"""Find expected value of all outputs o, this period, at beginning of first stage"""
cur_exp = None
for ss_report, lom_T in expectations_data:
# if we've already passed variable, take expectations
if cur_exp is not None:
cur_exp = lom_T @ cur_exp
# see if variable this period
if o in ss_report:
cur_exp = ss_report[o]
return cur_exp
def expectation_step_fakenews(self, cur_exp, expectations_data):
for _, lom_T in expectations_data:
cur_exp = lom_T @ cur_exp
return cur_exp
'''Preliminary processing'''
def preliminary_all_stages(self, ss):
"""Create lists of tuples with steady-state information for backward, forward, and
expectations iterations, each list going in the same time direction as the relevant iteration"""
# TODO: to make code more intelligible, this should be made object-oriented
backward_data = []
forward_data = []
expectations_data = []
for stage in reversed(self.stages):
potential_inputs = {**ss[stage.name], **ss}
input = {k: potential_inputs[k] for k in stage.inputs}
report = {k: ss[stage.name][k] for k in stage.report}
D = ss[stage.name]['D']
lom = ss[stage.name]['law_of_motion']
precomputed = stage.precompute(input, lom)
hetoutputs = None
if stage.hetoutputs is not None:
hetoutputs_inputs = {k: potential_inputs[k] for k in stage.hetoutputs.inputs}
hetoutputs = stage.hetoutputs.differentiable(hetoutputs_inputs)
backward_data.append((stage, input, D, lom, precomputed, hetoutputs))
forward_data.append((stage, report, D, lom))
expectations_data.append((report, lom.T))
return backward_data, forward_data[::-1], expectations_data
def preliminary_hetinput(self, ss, h):
differentiable_hetinputs = None
if self.hetinputs is not None:
# always use two-sided differentiation for hetinputs
differentiable_hetinputs = self.hetinputs.differentiable(ss, h, True)
return differentiable_hetinputs
'''HetInput and HetOutput options and processing'''
def extract_ss_dict(self, ss):
"""Flatten ss dict and internals for this block (if present) into one dict,
but keeping each stage within internals as a subdict"""
if isinstance(ss, SteadyStateDict):
ssnew = ss.toplevel.copy()
if self.name in ss.internals:
ssnew.update(ss.internals[self.name])
return ssnew
else:
return ss.copy()
def initialize_backward(self, ss):
"""if not all backward outputs of first stage (i.e. backward inputs
of final stage) are already in dict, call backward_init to generate them"""
# could generalize to allow backward_init to start us at different stage?
if not all(k in ss for k in self.stages[0].backward_outputs):
ss.update(self.backward_init(ss))
def next_stage(self, i):
return self.stages[(i+1) % len(self.stages)]
def process_hetinputs(self, hetinputs: Optional[CombinedExtendedFunction], tocopy=True):
if tocopy:
self = copy.copy(self)
inputs = self.original_inputs.copy()
#internals = self.original_internals.copy()
if hetinputs is not None:
inputs |= hetinputs.inputs
inputs -= hetinputs.outputs
#internals |= hetinputs.outputs
self.inputs = inputs
#self.internals = internals
self.hetinputs = hetinputs
# TODO: fix consequences with remap, as in het_block.py
return self
def add_hetinputs(self, functions):
if self.hetinputs is None:
return self.process_hetinputs(CombinedExtendedFunction(functions))
else:
return self.process_hetinputs(self.hetinputs.add(functions))
def remove_hetinputs(self, names):
return self.process_hetinputs(self.hetinputs.remove(names))
def return_hetinputs(self, d):
if self.hetinputs is not None:
return self.hetinputs(d)
else:
return {}
def save_original(self):
"""store "original" copies of these for use whenever we process new hetinputs/hetoutputs"""
self.original_inputs = self.inputs
self.original_outputs = self.outputs
# self.original_internals = self.internals
self.original_M_outputs = self.M_outputs
'''Flexible expectation vectors'''
# TODO: this is wrong; can we make something like this work?
# def preliminary_expectations(self, ss, loms=None):
# """allow for arbitrary loms, not the ones from ss; useful for counterfactuals"""
# # loms is Dict[stage.name: lom] in forward order
# expectations_data = []
# for stage in reversed(self.stages):
# report = {k: ss[stage.name][k] for k in stage.report}
# if loms is None:
# lom = ss[stage.name]['law_of_motion']
# else:
# lom = loms[stage.name]
# expectations_data.append((report, lom.T))
# return expectations_data
def expectation_vectors_level(self, o, T, expectations_data):
curlyE0 = self.expectations_beginning_of_period(o, expectations_data)
curlyEs = np.empty((T,) + curlyE0.shape)
curlyEs[0] = curlyE0
for t in range(1, T):
curlyEs[t] = self.expectation_step_fakenews(curlyEs[t-1], expectations_data)
return curlyEs
def preliminary_expectations(self, ssin):
ss = self.extract_ss_dict(ssin)
expectations_data = []
for stage in reversed(self.stages):
report = {k: ss[stage.name][k] for k in stage.report}
lom = ss[stage.name]['law_of_motion']
expectations_data.append((report, lom.T))
return expectations_data
def make_all_into_stages(stages: List[Stage]):
"""Given list of 'stages' that can include either actual stages or
objects with a .make_stage(next_stage_backward) method, turn all into stages.
Since .make_stage() requires the backward outputs from the next stage,
we need to find an actual stage to start with, which makes this a little harder."""
# copy since we'll overwrite
stages = list(stages)
# find first that is a stage (for now, an endogenous transition) already
for i, stage in enumerate(stages):
if isinstance(stage, Stage):
ifirst = i
break
else:
raise ValueError('No full-fledged stages supplied to constructor.')
# iterate backward from there, so that everything before ifirst is a stage
for i in range(ifirst-1, -1, -1):
if not isinstance(stages[i], Stage):
stages[i] = stages[i].make_stage(stages[i+1].backward_outputs)
# now iterate backward from the end
for i in range(len(stages)-1, ifirst, -1):
if not isinstance(stages[i], Stage):
stages[i] = stages[i].make_stage(stages[(i+1)%len(stages)].backward_outputs)
return stages
| 24,305 | 42.481216 | 141 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/simple_block.py | """Class definition of a simple block"""
import numpy as np
from copy import deepcopy
from .support.simple_displacement import ignore, Displace, AccumulatedDerivative
from .block import Block
from ..classes import SteadyStateDict, ImpulseDict, JacobianDict, SimpleSparse
from ..utilities import misc
from ..utilities.function import ExtendedFunction
'''Part 1: SimpleBlock class and @simple decorator to generate it'''
def simple(f):
return SimpleBlock(f)
class SimpleBlock(Block):
"""Generated from simple block written in Dynare-ish style and decorated with @simple, e.g.
@simple
def production(Z, K, L, alpha):
Y = Z * K(-1) ** alpha * L ** (1 - alpha)
return Y
which is a SimpleBlock that takes in Z, K, L, and alpha, all of which can be either constants
or series, and implements a Cobb-Douglas production function, noting that for production today
we use the capital K(-1) determined yesterday.
Key methods are .ss, .td, and .jac, like HetBlock.
"""
def __init__(self, f):
super().__init__()
self.f = ExtendedFunction(f)
self.name = self.f.name
self.inputs = self.f.inputs
self.outputs = self.f.outputs
def __repr__(self):
return f"<SimpleBlock '{self.name}'>"
def _steady_state(self, ss):
outputs = self.f.wrapped_call(ss, preprocess=ignore, postprocess=misc.numeric_primitive)
return SteadyStateDict({**ss, **outputs})
def _impulse_nonlinear(self, ss, inputs, outputs, ss_initial):
if ss_initial is None:
ss_initial = ss
ss_initial_flag = False
else:
ss_initial_flag = True
input_args = {}
for k, v in inputs.items():
if np.isscalar(v):
raise ValueError(f'Keyword argument {k}={v} is scalar, should be time path.')
input_args[k] = Displace(v + ss[k], ss[k], ss_initial[k], k)
for k in self.inputs:
if k not in input_args:
if not ss_initial_flag or (ss_initial_flag and np.array_equal(ss_initial[k], ss[k])):
input_args[k] = ignore(ss[k])
else:
input_args[k] = Displace(np.full(inputs.T, ss[k]), ss[k], ss_initial[k], k)
return ImpulseDict(make_impulse_uniform_length(self.f(input_args)))[outputs] - ss
def _impulse_linear(self, ss, inputs, outputs, Js):
return ImpulseDict(self.jacobian(ss, list(inputs.keys()), outputs, inputs.T, Js).apply(inputs))
def _jacobian(self, ss, inputs, outputs, T):
invertedJ = {i: {} for i in inputs}
# Loop over all inputs/shocks which we want to differentiate with respect to
for i in inputs:
invertedJ[i] = self.compute_single_shock_J(ss, i)
# Because we computed the Jacobian of all outputs with respect to each shock (invertedJ[i][o]),
# we need to loop back through to have J[o][i] to map for a given output `o`, shock `i`,
# the Jacobian curlyJ^{o,i}.
J = {o: {} for o in outputs}
for o in outputs:
for i in inputs:
# drop zeros from JacobianDict
if invertedJ[i][o] and not invertedJ[i][o].iszero:
J[o][i] = invertedJ[i][o]
return JacobianDict(J, outputs, inputs, self.name, T)
def compute_single_shock_J(self, ss, i):
input_args = {i: ignore(ss[i]) for i in self.inputs}
input_args[i] = AccumulatedDerivative(f_value=ss[i])
J = {o: {} for o in self.outputs}
for o_name, o in self.f(input_args).items():
if isinstance(o, AccumulatedDerivative):
J[o_name] = SimpleSparse(o.elements)
return J
# TODO: move this to impulse.py?
def make_impulse_uniform_length(out):
T = np.max([np.size(v) for v in out.values()])
return {k: (np.full(T, misc.numeric_primitive(v)) if np.isscalar(v) else misc.numeric_primitive(v))
for k, v in out.items()}
| 4,046 | 36.12844 | 103 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/het_block.py | import copy
import numpy as np
from typing import Optional, Dict
from .block import Block
from .. import utilities as utils
from ..classes import SteadyStateDict, ImpulseDict, JacobianDict
from ..utilities.function import ExtendedFunction, CombinedExtendedFunction
from ..utilities.ordered_set import OrderedSet
from ..utilities.bijection import Bijection
from .support.het_support import ForwardShockableTransition, ExpectationShockableTransition, lottery_1d, lottery_2d, Markov, CombinedTransition, Transition
def het(exogenous, policy, backward, backward_init=None, hetinputs=None, hetoutputs=None):
def decorator(backward_fun):
return HetBlock(backward_fun, exogenous, policy, backward, backward_init, hetinputs, hetoutputs)
return decorator
class HetBlock(Block):
def __init__(self, backward_fun, exogenous, policy, backward, backward_init=None, hetinputs=None, hetoutputs=None):
self.backward_fun = ExtendedFunction(backward_fun)
self.name = self.backward_fun.name
super().__init__()
self.exogenous = OrderedSet(utils.misc.make_tuple(exogenous))
self.policy, self.backward = (OrderedSet(utils.misc.make_tuple(x)) for x in (policy, backward))
self.non_backward_outputs = self.backward_fun.outputs - self.backward
self.outputs = OrderedSet([o.upper() for o in self.non_backward_outputs])
self.M_outputs = Bijection({o: o.upper() for o in self.non_backward_outputs})
self.inputs = self.backward_fun.inputs - [k + '_p' for k in self.backward]
self.inputs |= self.exogenous
self.internals = OrderedSet(['D', 'Dbeg']) | self.exogenous | self.backward_fun.outputs
self.static_checks()
# store "original" copies of these for use whenever we process new hetinputs/hetoutputs
self.original_inputs = self.inputs
self.original_outputs = self.outputs
self.original_internals = self.internals
self.original_M_outputs = self.M_outputs
# A HetBlock can have heterogeneous inputs and heterogeneous outputs, henceforth `hetinput` and `hetoutput`.
if hetinputs is not None:
hetinputs = CombinedExtendedFunction(hetinputs)
if hetoutputs is not None:
hetoutputs = CombinedExtendedFunction(hetoutputs)
self.process_hetinputs_hetoutputs(hetinputs, hetoutputs, tocopy=False)
if backward_init is not None:
backward_init = ExtendedFunction(backward_init)
self.backward_init = backward_init
def static_checks(self):
if len(self.policy) > 2:
raise ValueError(f"More than two endogenous policies in {self.name}, not yet supported")
for pol in self.policy:
if pol not in self.backward_fun.outputs:
raise ValueError(f"Policy '{pol}' not included as output in {self.name}")
if pol[0].isupper():
raise ValueError(f"Policy '{pol}' is uppercase in {self.name}, which is not allowed")
for back in self.backward:
if back + '_p' not in self.backward_fun.inputs:
raise ValueError(f"Backward variable '{back}_p' not included as argument in {self.name}")
if back not in self.backward_fun.outputs:
raise ValueError(f"Backward variable '{back}' not included as output in {self.name}")
if back in ['d', 'dbeg', 'D', 'Dbeg']:
raise ValueError(f"A backward variable is called D or Dbeg, which are reserved for the distribution.")
for out in self.non_backward_outputs:
if out[0].isupper():
raise ValueError("Output '{out}' is uppercase in {self.name}, which is not allowed")
def __repr__(self):
"""Nice string representation of HetBlock for printing to console"""
if self.hetinputs is not None:
if self.hetoutputs is not None:
return f"<HetBlock '{self.name}' with hetinput '{self.hetinputs.name}'" \
f" and with hetoutput `{self.hetoutputs.name}'>"
else:
return f"<HetBlock '{self.name}' with hetinput '{self.hetinputs.name}'>"
else:
return f"<HetBlock '{self.name}'>"
def _steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000,
forward_tol=1E-10, forward_maxit=100_000):
ss = self.extract_ss_dict(calibration)
self.update_with_hetinputs(ss)
self.initialize_backward(ss)
ss = self.backward_steady_state(ss, tol=backward_tol, maxit=backward_maxit)
Dbeg, D = self.forward_steady_state(ss, forward_tol, forward_maxit)
ss.update({'Dbeg': Dbeg, "D": D})
self.update_with_hetoutputs(ss)
# aggregate all outputs other than backward variables on grid, capitalize
toreturn = self.non_backward_outputs
if self.hetoutputs is not None:
toreturn = toreturn | self.hetoutputs.outputs
aggregates = {o.upper(): np.vdot(D, ss[o]) for o in toreturn}
ss.update(aggregates)
return SteadyStateDict({k: ss[k] for k in ss if k not in self.internals},
{self.name: {k: ss[k] for k in ss if k in self.internals}})
def _impulse_nonlinear(self, ssin, inputs, outputs, internals, ss_initial, monotonic=False):
ss = self.extract_ss_dict(ssin)
if ss_initial is not None:
# only effect of distinct initial ss on hetblock is different initial distribution
ss['Dbeg'] = ss_initial.internals[self.name]['Dbeg']
# identify individual variable paths we want from backward iteration, then run it
toreturn = self.non_backward_outputs
if self.hetoutputs is not None:
toreturn = toreturn | self.hetoutputs.outputs
toreturn = (toreturn | internals) - ['D', 'Dbeg']
individual_paths, exog_path = self.backward_nonlinear(ss, inputs, toreturn)
# run forward iteration to get path of distribution, add to individual_paths
self.forward_nonlinear(ss, individual_paths, exog_path, monotonic)
# obtain aggregates of all outputs, made uppercase
aggregates = {o: utils.optimized_routines.fast_aggregate(
individual_paths['D'], individual_paths[self.M_outputs.inv @ o]) for o in outputs}
# obtain internals
internals_dict = {self.name: {k: individual_paths[k] for k in internals}}
return ImpulseDict(aggregates, internals_dict, inputs.T) - ssin
def _impulse_linear(self, ss, inputs, outputs, Js, h=1E-4, twosided=False):
return ImpulseDict(self._jacobian(ss, list(inputs.keys()), outputs, inputs.T, h=h, twosided=twosided).apply(inputs))
def _jacobian(self, ss, inputs, outputs, T, h=1E-4, twosided=False):
ss = self.extract_ss_dict(ss)
outputs = self.M_outputs.inv @ outputs
# step 0: preliminary processing of steady state
exog = self.make_exog_law_of_motion(ss)
endog = self.make_endog_law_of_motion(ss)
differentiable_backward_fun, differentiable_hetinputs, differentiable_hetoutputs = self.jac_backward_prelim(ss, h, exog, twosided)
law_of_motion = CombinedTransition([exog, endog]).forward_shockable(ss['Dbeg'])
exog_by_output = {k: exog.expectation_shockable(ss[k]) for k in outputs | self.backward}
# step 1 of fake news algorithm
# compute curlyY and curlyD (backward iteration) for each input i
curlyYs, curlyDs = {}, {}
for i in inputs:
curlyYs[i], curlyDs[i] = self.backward_fakenews(i, outputs, T, differentiable_backward_fun,
differentiable_hetinputs, differentiable_hetoutputs,
law_of_motion, exog_by_output)
# step 2 of fake news algorithm
# compute expectation vectors curlyE for each outcome o
curlyPs = {}
for o in outputs:
curlyPs[o] = self.expectation_vectors(ss[o], T-1, law_of_motion)
# steps 3-4 of fake news algorithm
# make fake news matrix and Jacobian for each outcome-input pair
F, J = {}, {}
for o in outputs:
for i in inputs:
if o.upper() not in F:
F[o.upper()] = {}
if o.upper() not in J:
J[o.upper()] = {}
F[o.upper()][i] = HetBlock.build_F(curlyYs[i][o], curlyDs[i], curlyPs[o])
J[o.upper()][i] = HetBlock.J_from_F(F[o.upper()][i])
return JacobianDict(J, name=self.name, T=T)
'''Steady-state backward and forward methods'''
def backward_steady_state(self, ss, tol=1E-8, maxit=5000):
"""Backward iteration to get steady-state policies and other outcomes"""
ss = ss.copy()
exog = self.make_exog_law_of_motion(ss)
old = {}
for it in range(maxit):
for k in self.backward:
ss[k + '_p'] = exog.expectation(ss[k])
del ss[k]
ss.update(self.backward_fun(ss))
if it % 10 == 1 and all(utils.optimized_routines.within_tolerance(ss[k], old[k], tol)
for k in self.policy):
break
old.update({k: ss[k] for k in self.policy})
else:
raise ValueError(f'No convergence of policy functions after {maxit} backward iterations!')
for k in self.backward:
del ss[k + '_p']
return ss
def forward_steady_state(self, ss, tol=1E-10, maxit=100_000):
"""Forward iteration to get steady-state distribution"""
exog = self.make_exog_law_of_motion(ss)
endog = self.make_endog_law_of_motion(ss)
Dbeg_seed = ss.get('Dbeg', None)
pi_seeds = [ss.get(k + '_seed', None) for k in self.exogenous]
# first obtain initial distribution D
if Dbeg_seed is None:
# stationary distribution of each exogenous
pis = [exog[i].stationary(pi_seed) for i, pi_seed in enumerate(pi_seeds)]
# uniform distribution over endogenous
endog_uniform = [np.full(len(ss[k+'_grid']), 1/len(ss[k+'_grid'])) for k in self.policy]
# initialize outer product of all these as guess
Dbeg = utils.multidim.outer(pis + endog_uniform)
else:
Dbeg = Dbeg_seed
# iterate until convergence by tol, or maxit
D = exog.forward(Dbeg)
for it in range(maxit):
Dbeg_new = endog.forward(D)
D_new = exog.forward(Dbeg_new)
# only check convergence every 10 iterations for efficiency
if it % 10 == 0 and utils.optimized_routines.within_tolerance(Dbeg, Dbeg_new, tol):
break
Dbeg = Dbeg_new
D = D_new
else:
raise ValueError(f'No convergence after {maxit} forward iterations!')
# "D" is after the exogenous shock, Dbeg is before it
return Dbeg, D
'''Nonlinear impulse backward and forward methods'''
def backward_nonlinear(self, ss, inputs, toreturn):
T = inputs.T
individual_paths = {k: np.empty((T,) + ss[k].shape) for k in toreturn}
backdict = ss.copy()
exog = self.make_exog_law_of_motion(backdict)
exog_path = []
for t in reversed(range(T)):
for k in self.backward:
backdict[k + '_p'] = exog.expectation(backdict[k])
del backdict[k]
backdict.update({k: ss[k] + v[t, ...] for k, v in inputs.items()})
self.update_with_hetinputs(backdict)
backdict.update(self.backward_fun(backdict))
self.update_with_hetoutputs(backdict)
for k in individual_paths:
individual_paths[k][t, ...] = backdict[k]
exog = self.make_exog_law_of_motion(backdict)
exog_path.append(exog)
return individual_paths, exog_path[::-1]
def forward_nonlinear(self, ss, individual_paths, exog_path, monotonic):
T = len(exog_path)
Dbeg = ss['Dbeg']
Dbeg_path = np.empty((T,) + Dbeg.shape)
Dbeg_path[0, ...] = Dbeg
D_path = np.empty_like(Dbeg_path)
for t in range(T):
endog = self.make_endog_law_of_motion({**ss, **{k: individual_paths[k][t, ...] for k in self.policy}}, monotonic)
# now step forward in two, first exogenous this period then endogenous
D_path[t, ...] = exog_path[t].forward(Dbeg)
if t < T-1:
Dbeg = endog.forward(D_path[t, ...])
Dbeg_path[t+1, ...] = Dbeg # make this optional
individual_paths['D'] = D_path
individual_paths['Dbeg'] = Dbeg_path
'''Jacobian calculation: four parts of fake news algorithm, plus support methods'''
def backward_fakenews(self, input_shocked, output_list, T, differentiable_backward_fun,
differentiable_hetinput, differentiable_hetoutput,
law_of_motion: ForwardShockableTransition, exog: Dict[str, ExpectationShockableTransition]):
"""Part 1 of fake news algorithm: calculate curlyY and curlyD in response to fake news shock"""
# contemporaneous effect of unit scalar shock to input_shocked
din_dict = {input_shocked: 1}
if differentiable_hetinput is not None and input_shocked in differentiable_hetinput.inputs:
din_dict.update(differentiable_hetinput.diff({input_shocked: 1}))
curlyV, curlyD, curlyY = self.backward_step_fakenews(din_dict, output_list, differentiable_backward_fun,
differentiable_hetoutput, law_of_motion, exog, True)
# infer dimensions from this, initialize empty arrays, and fill in contemporaneous effect
curlyDs = np.empty((T,) + curlyD.shape)
curlyYs = {k: np.empty(T) for k in curlyY.keys()}
curlyDs[0, ...] = curlyD
for k in curlyY.keys():
curlyYs[k][0] = curlyY[k]
# fill in anticipation effects of shock up to horizon T
for t in range(1, T):
curlyV, curlyDs[t, ...], curlyY = self.backward_step_fakenews({k+'_p': v for k, v in curlyV.items()},
output_list, differentiable_backward_fun,
differentiable_hetoutput, law_of_motion, exog)
for k in curlyY.keys():
curlyYs[k][t] = curlyY[k]
return curlyYs, curlyDs
def expectation_vectors(self, o_ss, T, law_of_motion: Transition):
"""Part 2 of fake news algorithm: calculate expectation vectors curlyE"""
curlyEs = np.empty((T,) + o_ss.shape)
# initialize with beginning-of-period expectation of steady-state policy
curlyEs[0, ...] = utils.misc.demean(law_of_motion[0].expectation(o_ss))
for t in range(1, T):
# demean so that curlyEs converge to zero, in theory no effect but better numerically
curlyEs[t, ...] = utils.misc.demean(law_of_motion.expectation(curlyEs[t-1, ...]))
return curlyEs
@staticmethod
def build_F(curlyYs, curlyDs, curlyEs):
"""Part 3 of fake news algorithm: build fake news matrix from curlyY, curlyD, curlyE"""
T = curlyDs.shape[0]
Tpost = curlyEs.shape[0] - T + 2
F = np.empty((Tpost + T - 1, T))
F[0, :] = curlyYs
F[1:, :] = curlyEs.reshape((Tpost + T - 2, -1)) @ curlyDs.reshape((T, -1)).T
return F
@staticmethod
def J_from_F(F):
"""Part 4 of fake news algorithm: recursively build Jacobian from fake news matrix"""
J = F.copy()
for t in range(1, J.shape[1]):
J[1:, t] += J[:-1, t - 1]
return J
def backward_step_fakenews(self, din_dict, output_list, differentiable_backward_fun,
differentiable_hetoutput, law_of_motion: ForwardShockableTransition,
exog: Dict[str, ExpectationShockableTransition], maybe_exog_shock=False):
"""Support for part 1 of fake news algorithm: single backward step in response to shock"""
Dbeg, D = law_of_motion[0].Dss, law_of_motion[1].Dss
# shock perturbs outputs
shocked_outputs = differentiable_backward_fun.diff(din_dict)
curlyV = {k: law_of_motion[0].expectation(shocked_outputs[k]) for k in self.backward}
# if there might be a shock to exogenous processes, figure out what it is
if maybe_exog_shock:
shocks_to_exog = [din_dict.get(k, None) for k in self.exogenous]
else:
shocks_to_exog = None
# perturbation to exog and outputs outputs affects distribution tomorrow
policy_shock = [shocked_outputs[k] for k in self.policy]
if len(policy_shock) == 1:
policy_shock = policy_shock[0]
curlyD = law_of_motion.forward_shock([shocks_to_exog, policy_shock])
# and also affect aggregate outcomes today
if differentiable_hetoutput is not None and (output_list & differentiable_hetoutput.outputs):
shocked_outputs.update(differentiable_hetoutput.diff({**shocked_outputs, **din_dict}, outputs=differentiable_hetoutput.outputs & output_list))
curlyY = {k: np.vdot(D, shocked_outputs[k]) for k in output_list}
# add effects from perturbation to exog on beginning-of-period expectations in curlyV and curlyY
if maybe_exog_shock:
for k in curlyV:
shock = exog[k].expectation_shock(shocks_to_exog)
if shock is not None:
curlyV[k] += shock
for k in curlyY:
shock = exog[k].expectation_shock(shocks_to_exog)
# maybe could be more efficient since we don't need to calculate pointwise?
if shock is not None:
curlyY[k] += np.vdot(Dbeg, shock)
return curlyV, curlyD, curlyY
def jac_backward_prelim(self, ss, h, exog, twosided):
"""Support for part 1 of fake news algorithm: preload differentiable functions"""
differentiable_hetinputs = None
if self.hetinputs is not None:
# always use two-sided differentiation for hetinputs
differentiable_hetinputs = self.hetinputs.differentiable(ss, h, True)
differentiable_hetoutputs = None
if self.hetoutputs is not None:
differentiable_hetoutputs = self.hetoutputs.differentiable(ss, h, twosided)
ss = ss.copy()
for k in self.backward:
ss[k + '_p'] = exog.expectation(ss[k])
differentiable_backward_fun = self.backward_fun.differentiable(ss, h, twosided)
return differentiable_backward_fun, differentiable_hetinputs, differentiable_hetoutputs
'''HetInput and HetOutput options and processing'''
def process_hetinputs_hetoutputs(self, hetinputs: Optional[CombinedExtendedFunction], hetoutputs: Optional[CombinedExtendedFunction], tocopy=True):
if tocopy:
self = copy.copy(self)
inputs = self.original_inputs.copy()
outputs = self.original_outputs.copy()
internals = self.original_internals.copy()
if hetoutputs is not None:
inputs |= (hetoutputs.inputs - self.backward_fun.outputs - ['D'])
outputs |= [o.upper() for o in hetoutputs.outputs]
self.M_outputs = Bijection({o: o.upper() for o in hetoutputs.outputs}) @ self.original_M_outputs
internals |= hetoutputs.outputs
if hetinputs is not None:
inputs |= hetinputs.inputs
inputs -= hetinputs.outputs
internals |= hetinputs.outputs
self.inputs = inputs
self.outputs = outputs
self.internals = internals
self.hetinputs = hetinputs
self.hetoutputs = hetoutputs
# TODO: fix consequences with a self.M @ if there is remap!
return self
def add_hetinputs(self, functions):
if self.hetinputs is None:
return self.process_hetinputs_hetoutputs(CombinedExtendedFunction(functions), self.hetoutputs)
else:
return self.process_hetinputs_hetoutputs(self.hetinputs.add(functions), self.hetoutputs)
def remove_hetinputs(self, names):
return self.process_hetinputs_hetoutputs(self.hetinputs.remove(names), self.hetoutputs)
def add_hetoutputs(self, functions):
if self.hetoutputs is None:
return self.process_hetinputs_hetoutputs(self.hetinputs, CombinedExtendedFunction(functions))
else:
return self.process_hetinputs_hetoutputs(self.hetinputs, self.hetoutputs.add(functions))
def remove_hetoutputs(self, names):
return self.process_hetinputs_hetoutputs(self.hetinputs, self.hetoutputs.remove(names))
def update_with_hetinputs(self, d):
if self.hetinputs is not None:
d.update(self.hetinputs(d))
def update_with_hetoutputs(self, d):
if self.hetoutputs is not None:
d.update(self.hetoutputs(d))
'''Additional helper functions'''
def extract_ss_dict(self, ss):
if isinstance(ss, SteadyStateDict):
ssnew = ss.toplevel.copy()
if self.name in ss.internals:
ssnew.update(ss.internals[self.name])
return ssnew
else:
return ss.copy()
def initialize_backward(self, ss):
if not all(k in ss for k in self.backward):
ss.update(self.backward_init(ss))
def make_exog_law_of_motion(self, d:dict):
return CombinedTransition([Markov(d[k], i) for i, k in enumerate(self.exogenous)])
def make_endog_law_of_motion(self, d: dict, monotonic=False):
if len(self.policy) == 1:
return lottery_1d(d[self.policy[0]], d[self.policy[0] + '_grid'], monotonic)
else:
return lottery_2d(d[self.policy[0]], d[self.policy[1]],
d[self.policy[0] + '_grid'], d[self.policy[1] + '_grid'], monotonic) | 22,237 | 44.016194 | 155 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/__init__.py | """Block-construction tools""" | 30 | 30 | 30 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/stages.py | from typing import List, Optional
import numpy as np
import copy
# from sequence_jacobian.blocks.support.het_support import DiscreteChoice
from sequence_jacobian.blocks.support.law_of_motion import DiscreteChoice
from ...utilities.function import ExtendedFunction, CombinedExtendedFunction
from ...utilities.ordered_set import OrderedSet
from ...utilities.misc import make_tuple, logit_choice
from .law_of_motion import (lottery_1d, ShockedPolicyLottery1D,
lottery_2d, ShockedPolicyLottery2D,
Markov)
class Stage:
def backward_step(self, inputs, lawofmotion=False):
pass
def backward_step_shock(self, ss, shocks, precomputed=None):
pass
def precompute(self, ss, ss_lawofmotion=None):
pass
def backward_step_separate(self, inputs, lawofmotion=False, hetoutputs=False):
"""Wrapper around backward_step that also obtains hetoutputs,
and returns backward, report, and sometimes lom separately"""
outputs = self.backward_step(inputs, lawofmotion)
if lawofmotion:
outputs, lom = outputs
backward_outputs = {k: outputs[k] for k in self.backward_outputs}
report = {k: outputs[k] for k in self.original_report}
if hetoutputs and self.hetoutputs is not None:
inputs = {**inputs, **outputs}
#report.update(self.hetoutputs(all_inputs))
# for some reason self.hetoutputs returns its inputs too, need to fix that
report.update({k: v for k, v in self.hetoutputs(inputs).items() if k in self.hetoutputs.outputs})
if lawofmotion:
return (backward_outputs, report), lom
else:
return backward_outputs, report
def __init__(self, hetoutputs=None):
# instance variables of a stage:
# self.name = ""
# self.backward_outputs = OrderedSet([])
# self.report = OrderedSet([])
# self.inputs = OrderedSet([])
self.original_inputs = self.inputs.copy()
self.original_report = self.report.copy()
if hetoutputs is not None:
hetoutputs = CombinedExtendedFunction(hetoutputs)
self.process_hetoutputs(hetoutputs, tocopy=False)
def process_hetoutputs(self, hetoutputs: Optional[CombinedExtendedFunction], tocopy=True):
if tocopy:
self = copy.copy(self)
self.inputs = self.original_inputs.copy()
self.report = self.original_report.copy()
if hetoutputs is not None:
self.inputs |= (hetoutputs.inputs - self.report - self.backward_outputs)
self.report |= hetoutputs.outputs
self.hetoutputs = hetoutputs
return self
def add_hetoutputs(self, functions):
if self.hetoutputs is None:
return self.process_hetoutputs(CombinedExtendedFunction(functions))
else:
return self.process_hetoutputs(self.hetoutputs.add(functions))
def remove_hetoutputs(self, names):
return self.process_hetoutputs(self.hetoutputs.remove(names))
# def return_hetinputs(self, d):
# if self.hetinputs is not None:
# return self.hetinputs(d)
# else:
# return {}
class Continuous1D(Stage):
"""Stage that does one-dimensional endogenous continuous choice"""
def __init__(self, backward, policy, f, name=None, hetoutputs=None):
# subclass-specific attributes
self.f = ExtendedFunction(f)
self.policy = policy
# attributes needed for any stage
if name is None:
name = self.f.name
self.name = name
self.backward_outputs = OrderedSet(make_tuple(backward))
self.report = self.f.outputs - self.backward_outputs
self.inputs = self.f.inputs
super().__init__(hetoutputs)
def __repr__(self):
return f"<Stage-Continuous1D '{self.name}' with policy '{self.policy}'>"
def backward_step(self, inputs, lawofmotion=False):
outputs = self.f(inputs)
if not lawofmotion:
return outputs
else:
# TODO: option for monotonic?!
return outputs, lottery_1d(outputs[self.policy], inputs[self.policy + '_grid'], monotonic=False)
def backward_step_shock(self, ss, shocks, precomputed):
space, i, grid, f = precomputed
outputs = f.diff(shocks)
dpi = -outputs[self.policy] / space
return outputs, ShockedPolicyLottery1D(i, dpi, grid)
def precompute(self, ss, ss_lawofmotion):
i = ss_lawofmotion.i.reshape(ss_lawofmotion.shape)
grid = ss_lawofmotion.grid
return grid[i + 1] - grid[i], i, grid, self.f.differentiable(ss)
class Continuous2D(Stage):
"""Stage that does two-dimensional endogenous continuous choice"""
def __init__(self, backward, policy, f, name=None, hetoutputs=None):
# subclass-specific attributes
self.f = ExtendedFunction(f)
self.policy = OrderedSet(policy)
# attributes needed for any stage
if name is None:
name = self.f.name
self.name = name
self.backward_outputs = OrderedSet(make_tuple(backward))
self.report = self.f.outputs - self.backward_outputs
self.inputs = self.f.inputs
super().__init__(hetoutputs)
def __repr__(self):
return f"<Stage-Continuous2D '{self.name}' with policies {self.policy}>"
def backward_step(self, inputs, lawofmotion=False):
outputs = self.f(inputs)
if not lawofmotion:
return outputs
else:
# TODO: option for monotonic?!
return outputs, lottery_2d(outputs[self.policy[0]], outputs[self.policy[1]],
inputs[self.policy[0] + '_grid'], inputs[self.policy[1] + '_grid'])
def backward_step_shock(self, ss, shocks, precomputed):
space1, space2, i1, i2, grid1, grid2, f = precomputed
outputs = f.diff(shocks)
dpi1 = -outputs[self.policy[0]] / space1
dpi2 = -outputs[self.policy[1]] / space2
return outputs, ShockedPolicyLottery2D(i1, dpi1, i2, dpi2, grid1, grid2)
def precompute(self, ss, ss_lawofmotion):
i1 = ss_lawofmotion.i1.reshape(ss_lawofmotion.shape)
i2 = ss_lawofmotion.i2.reshape(ss_lawofmotion.shape)
grid1 = ss_lawofmotion.grid1
grid2 = ss_lawofmotion.grid2
return (grid1[i1 + 1] - grid1[i1], grid2[i2 + 1] - grid2[i2],
i1, i2, grid1, grid2, self.f.differentiable(ss))
class ExogenousMaker:
"""Call make_stage with backward returned by next stage to get Exogenous stage"""
def __init__(self, markov_name, index, name=None, hetoutputs=None):
self.markov_name = markov_name
self.index = index
if name is None:
name = f"exog_{markov_name}"
self.name = name
self.hetoutputs = hetoutputs
def make_stage(self, backward):
return Exogenous(self.markov_name, self.index, self.name, backward, self.hetoutputs)
class Exogenous(Stage):
"""Stage that applies exogenous Markov process along one dimension"""
def __init__(self, markov_name, index, name, backward, hetoutputs=None):
# subclass-specific attributes
self.markov_name = markov_name
self.index = index
# attributes needed for any stage
self.name = name
self.backward_outputs = backward
self.report = OrderedSet([])
self.inputs = backward | [markov_name]
super().__init__(hetoutputs)
def __repr__(self):
return f"<Stage-Exogenous '{self.name}' with Markov matrix '{self.markov_name}'>"
def backward_step(self, inputs, lawofmotion=False):
Pi = Markov(inputs[self.markov_name], self.index)
outputs = {k: Pi @ inputs[k] for k in self.backward_outputs}
if not lawofmotion:
return outputs
else:
return outputs, Pi.T
def backward_step_shock(self, ss, shocks, precomputed=None):
Pi = Markov(ss[self.markov_name], self.index)
outputs = {k: Pi @ shocks[k] for k in self.backward_outputs if k in shocks}
if self.markov_name in shocks:
dPi = Markov(shocks[self.markov_name], self.index)
for k in self.backward_outputs:
if k in outputs:
outputs[k] += dPi @ ss[k]
else:
outputs[k] = dPi @ ss[k]
return outputs, dPi.T
else:
return outputs, None
class LogitChoice(Stage):
"""Stage that does endogenous discrete choice with type 1 extreme value taste shocks"""
def __init__(self, value, backward, index, taste_shock_scale, f=None, name=None, hetoutputs=None):
# flow utility function, if present, should return a single output
if f is not None:
f = ExtendedFunction(f)
if not len(f.outputs) == 1:
raise ValueError(f'Flow utility function {f.name} returning multiple outputs {f.outputs}')
self.f = f
else:
self.f = None
# other subclass-specific attributes
self.index = index
self.value = value
self.backward = OrderedSet(make_tuple(backward))
self.taste_shock_scale = taste_shock_scale
# attributes needed for any stage
if name is None:
name = self.f.name
self.name = name
self.backward_outputs = self.backward | [value]
self.report = OrderedSet([])
self.inputs = self.backward | [value, taste_shock_scale]
if f is not None:
self.inputs |= f.inputs
super().__init__(hetoutputs)
def __repr__(self):
return f"<Stage-Discrete '{self.name}'>"
def backward_step(self, inputs, lawofmotion=False):
# start with value we're given
V_next = inputs[self.value]
# add dimension at beginning to allow for choice, then swap (today's choice determines next stages's state)
V = V_next[np.newaxis, ...]
V = np.swapaxes(V, 0, self.index+1)
# call f if we have it to get flow utility
if self.f is not None:
flow_u = self.f(inputs)
flow_u = next(iter(flow_u.values()))
else:
# create phantom state variable, convenient but bit wasteful
nchoice = V.shape[0]
flow_u = np.zeros((nchoice,) + V_next.shape)
V = flow_u + V
# calculate choice probabilities and expected value
P, EV = logit_choice(V, inputs[self.taste_shock_scale])
# make law of motion, use it to take expectations of everything else
lom = DiscreteChoice(P, self.index)
# take expectations
outputs = {k: lom.T @ inputs[k] for k in self.backward}
outputs[self.value] = EV
if not lawofmotion:
return outputs
else:
return outputs, lom
def backward_step_shock(self, ss, shocks, precomputed):
"""See 'discrete choice math' note for background. Note that scale is inverse of 'c' in that note."""
f, lom = precomputed
# this part parallel to backward_step, just with derivatives...
dV_next = shocks[self.value]
dV = dV_next[np.newaxis, ...]
dV = np.swapaxes(dV, 0, self.index+1)
if f is not None:
dflow_u = f.diff(shocks)
dflow_u = next(iter(dflow_u.values()))
dflow_u = np.nan_to_num(dflow_u) # -inf - (-inf) = nan, want zeros
else:
dflow_u = np.zeros_like(lom.P)
dV = dflow_u + dV
# simply take expectations to get shock to expected value function (envelope result)
dEV = np.sum(lom.P * dV, axis=0)
# calculate shocks to choice probabilities (note nifty broadcasting of dEV)
scale = ss[self.taste_shock_scale]
dP = lom.P * (dV - dEV) / scale
dlom = DiscreteChoice(dP, self.index)
# find shocks to outputs, aggregate everything of interest
doutputs = {self.value: dEV}
for k in self.backward:
doutputs[k] = dlom.T @ ss[k]
if k in shocks:
doutputs[k] += lom.T @ shocks[k]
return doutputs, dlom
def precompute(self, ss, ss_lawofmotion):
f = self.f.differentiable(ss) if self.f is not None else None
return f, ss_lawofmotion | 12,458 | 35.970326 | 115 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/law_of_motion.py | import numpy as np
from . import het_compiled
from ...utilities.interpolate import interpolate_coord_robust, interpolate_coord
from ...utilities.multidim import batch_multiply_ith_dimension, multiply_ith_dimension
from typing import Optional, Sequence, Any, List, Tuple, Union
import copy
class LawOfMotion:
"""Abstract class representing a matrix that operates on state space.
Rather than giant Ns*Ns matrix (even if sparse), some other representation
almost always desirable; such representations are subclasses of this."""
def __matmul__(self, X):
pass
@property
def T(self):
pass
def lottery_1d(a, a_grid, monotonic=False):
if not monotonic:
return PolicyLottery1D(*interpolate_coord_robust(a_grid, a), a_grid)
else:
return PolicyLottery1D(*interpolate_coord(a_grid, a), a_grid)
class PolicyLottery1D(LawOfMotion):
# TODO: always operates on final dimension, make more general!
def __init__(self, i, pi, grid, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i = i.reshape((-1,) + grid.shape)
self.flatshape = self.i.shape
self.pi = pi.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i.shape
self.grid = grid
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-1:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
return het_compiled.expectation_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
class ShockedPolicyLottery1D(PolicyLottery1D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
def lottery_2d(a, b, a_grid, b_grid, monotonic=False):
if not monotonic:
return PolicyLottery2D(*interpolate_coord_robust(a_grid, a),
*interpolate_coord_robust(b_grid, b), a_grid, b_grid)
if monotonic:
# right now we have no monotonic 2D examples, so this shouldn't be called
return PolicyLottery2D(*interpolate_coord(a_grid, a),
*interpolate_coord(b_grid, b), a_grid, b_grid)
class PolicyLottery2D(LawOfMotion):
def __init__(self, i1, pi1, i2, pi2, grid1, grid2, forward=True):
# flatten non-policy dimensions into one because that's what methods accept
self.i1 = i1.reshape((-1,) + grid1.shape + grid2.shape)
self.flatshape = self.i1.shape
self.i2 = i2.reshape(self.flatshape)
self.pi1 = pi1.reshape(self.flatshape)
self.pi2 = pi2.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i1.shape
self.grid1 = grid1
self.grid2 = grid2
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-2:]
self.forward = forward
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
else:
return het_compiled.expectation_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
class ShockedPolicyLottery2D(PolicyLottery2D):
def __matmul__(self, X):
if self.forward:
return het_compiled.forward_policy_shock_2d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
else:
raise NotImplementedError
class Markov(LawOfMotion):
def __init__(self, Pi, i):
self.Pi = Pi
self.i = i
@property
def T(self):
newself = copy.copy(self)
newself.Pi = newself.Pi.T
if isinstance(newself.Pi, np.ndarray):
# optimizing: copy to get right order in memory
newself.Pi = newself.Pi.copy()
return newself
def __matmul__(self, X):
return multiply_ith_dimension(self.Pi, self.i, X)
class DiscreteChoice(LawOfMotion):
def __init__(self, P, i):
self.P = P # choice prob P(d|...s_i...), 0 for unavailable choices
self.i = i # dimension of state space that will be updated
# cache "transposed" version of this, since we'll always need both!
self.forward = True
self.P_T = P.swapaxes(0, 1+self.i).copy()
@property
def T(self):
newself = copy.copy(self)
newself.forward = not self.forward
return newself
def __matmul__(self, X):
if self.forward:
return batch_multiply_ith_dimension(self.P, self.i, X)
else:
return batch_multiply_ith_dimension(self.P_T, self.i, X)
| 5,451 | 33.506329 | 119 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/parent.py | from copy import deepcopy
class Parent:
# see tests in test_parent_block.py
def __init__(self, blocks, name=None):
# dict from names to immediate kid blocks themselves
# dict from descendants to the names of kid blocks through which to access them
# "descendants" of a block include itself
if not hasattr(self, 'name') and name is not None:
self.name = name
kids = {}
descendants = {}
for block in blocks:
kids[block.name] = block
if isinstance(block, Parent):
for k in block.descendants:
if k in descendants:
raise ValueError(f'Overlapping block name {k}')
descendants[k] = block.name
else:
descendants[block.name] = block.name
# add yourself to descendants too! but you don't belong to any kid...
if self.name in descendants:
raise ValueError(f'Overlapping block name {self.name}')
descendants[self.name] = None
self.kids = kids
self.descendants = descendants
def __getitem__(self, k):
if k == self.name:
return self
elif k in self.kids:
return self.kids[k]
else:
return self.kids[self.descendants[k]][k]
def select(self, d, kid):
"""If d is a dict with block names as keys and kid is a kid, select only the entries in d that are descendants of kid"""
return {k: v for k, v in d.items() if k in self.kids[kid].descendants}
def path(self, k, reverse=True):
if k not in self.descendants:
raise KeyError(f'Cannot get path to {k} because it is not a descendant of current block')
if k != self.name:
kid = self.kids[self.descendants[k]]
if isinstance(kid, Parent):
p = kid.path(k, reverse=False)
else:
p = [k]
else:
p = []
p.append(self.name)
if reverse:
return list(reversed(p))
else:
return p
def get_attribute(self, k, attr):
"""Gets attribute attr from descendant k, respecting any remapping
along the way (requires that attr is list, dict, set)"""
if k == self.name:
inner = getattr(self, attr)
else:
kid = self.kids[self.descendants[k]]
if isinstance(kid, Parent):
inner = kid.get_attribute(k, attr)
else:
inner = getattr(kid, attr)
if hasattr(kid, 'M'):
inner = kid.M @ inner
if hasattr(self, 'M'):
return self.M @ inner
else:
return inner
| 2,775 | 32.047619 | 128 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/het_compiled.py | import numpy as np
from numba import njit
@njit
def forward_policy_1d(D, x_i, x_pi):
nZ, nX = D.shape
Dnew = np.zeros_like(D)
for iz in range(nZ):
for ix in range(nX):
i = x_i[iz, ix]
pi = x_pi[iz, ix]
d = D[iz, ix]
Dnew[iz, i] += d * pi
Dnew[iz, i+1] += d * (1 - pi)
return Dnew
@njit
def expectation_policy_1d(X, x_i, x_pi):
nZ, nX = X.shape
Xnew = np.zeros_like(X)
for iz in range(nZ):
for ix in range(nX):
i = x_i[iz, ix]
pi = x_pi[iz, ix]
Xnew[iz, ix] = pi * X[iz, i] + (1-pi) * X[iz, i+1]
return Xnew
@njit
def forward_policy_shock_1d(Dss, x_i_ss, x_pi_shock):
"""forward_step_1d linearized wrt x_pi"""
nZ, nX = Dss.shape
Dshock = np.zeros_like(Dss)
for iz in range(nZ):
for ix in range(nX):
i = x_i_ss[iz, ix]
dshock = x_pi_shock[iz, ix] * Dss[iz, ix]
Dshock[iz, i] += dshock
Dshock[iz, i + 1] -= dshock
return Dshock
@njit
def forward_policy_2d(D, x_i, y_i, x_pi, y_pi):
nZ, nX, nY = D.shape
Dnew = np.zeros_like(D)
for iz in range(nZ):
for ix in range(nX):
for iy in range(nY):
ixp = x_i[iz, ix, iy]
iyp = y_i[iz, ix, iy]
beta = x_pi[iz, ix, iy]
alpha = y_pi[iz, ix, iy]
Dnew[iz, ixp, iyp] += alpha * beta * D[iz, ix, iy]
Dnew[iz, ixp+1, iyp] += alpha * (1 - beta) * D[iz, ix, iy]
Dnew[iz, ixp, iyp+1] += (1 - alpha) * beta * D[iz, ix, iy]
Dnew[iz, ixp+1, iyp+1] += (1 - alpha) * (1 - beta) * D[iz, ix, iy]
return Dnew
@njit
def expectation_policy_2d(X, x_i, y_i, x_pi, y_pi):
nZ, nX, nY = X.shape
Xnew = np.empty_like(X)
for iz in range(nZ):
for ix in range(nX):
for iy in range(nY):
ixp = x_i[iz, ix, iy]
iyp = y_i[iz, ix, iy]
alpha = x_pi[iz, ix, iy]
beta = y_pi[iz, ix, iy]
Xnew[iz, ix, iy] = (alpha * beta * X[iz, ixp, iyp] + alpha * (1-beta) * X[iz, ixp, iyp+1] +
(1-alpha) * beta * X[iz, ixp+1, iyp] +
(1-alpha) * (1-beta) * X[iz, ixp+1, iyp+1])
return Xnew
@njit
def forward_policy_shock_2d(Dss, x_i_ss, y_i_ss, x_pi_ss, y_pi_ss, x_pi_shock, y_pi_shock):
"""Endogenous update part of forward_step_shock_2d"""
nZ, nX, nY = Dss.shape
Dshock = np.zeros_like(Dss)
for iz in range(nZ):
for ix in range(nX):
for iy in range(nY):
ixp = x_i_ss[iz, ix, iy]
iyp = y_i_ss[iz, ix, iy]
alpha = x_pi_ss[iz, ix, iy]
beta = y_pi_ss[iz, ix, iy]
dalpha = x_pi_shock[iz, ix, iy] * Dss[iz, ix, iy]
dbeta = y_pi_shock[iz, ix, iy] * Dss[iz, ix, iy]
Dshock[iz, ixp, iyp] += dalpha * beta + alpha * dbeta
Dshock[iz, ixp+1, iyp] += dbeta * (1-alpha) - beta * dalpha
Dshock[iz, ixp, iyp+1] += dalpha * (1-beta) - alpha * dbeta
Dshock[iz, ixp+1, iyp+1] -= dalpha * (1-beta) + dbeta * (1-alpha)
return Dshock
| 3,292 | 30.361905 | 107 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/__init__.py | """Other classes and helpers to aid standard block functionality: .steady_state, .impulse_linear, .impulse_nonlinear,
.jacobian"""
| 131 | 43 | 117 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/het_support.py | import numpy as np
from . import het_compiled
from ...utilities.discretize import stationary as general_stationary
from ...utilities.interpolate import interpolate_coord_robust, interpolate_coord
from ...utilities.multidim import batch_multiply_ith_dimension, multiply_ith_dimension
from ...utilities.misc import logsum
from typing import Optional, Sequence, Any, List, Tuple, Union
class Transition:
"""Abstract class for PolicyLottery or ManyMarkov, i.e. some part of state-space transition"""
def forward(self, D):
pass
def expectation(self, X):
pass
def forward_shockable(self, Dss):
pass
def expectation_shockable(self, Xss):
raise NotImplementedError(f'Shockable expectation not implemented for {type(self)}')
class ForwardShockableTransition(Transition):
"""Abstract class extending Transition, allowing us to find effect of shock to transition rule
on one-period-ahead distribution. This functionality isn't included in the regular Transition
because it requires knowledge of the incoming ("steady-state") distribution and also sometimes
some precomputation.
One crucial thing here is the order of shock arguments in shocks. Also, is None is the default
argument for a shock, we allow that shock to be None. We always allow shocks in lists to be None."""
def forward_shock(self, shocks):
pass
class ExpectationShockableTransition(Transition):
def expectation_shock(self, shocks):
pass
def lottery_1d(a, a_grid, monotonic=False):
if not monotonic:
return PolicyLottery1D(*interpolate_coord_robust(a_grid, a), a_grid)
else:
return PolicyLottery1D(*interpolate_coord(a_grid, a), a_grid)
class PolicyLottery1D(Transition):
# TODO: always operates on final dimension, highly non-generic in that sense
def __init__(self, i, pi, grid):
# flatten non-policy dimensions into one because that's what methods accept
self.i = i.reshape((-1,) + grid.shape)
self.flatshape = self.i.shape
self.pi = pi.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i.shape
self.grid = grid
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-1:]
def forward(self, D):
return het_compiled.forward_policy_1d(D.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
def expectation(self, X):
return het_compiled.expectation_policy_1d(X.reshape(self.flatshape), self.i, self.pi).reshape(self.shape)
def forward_shockable(self, Dss):
return ForwardShockablePolicyLottery1D(self.i.reshape(self.shape), self.pi.reshape(self.shape),
self.grid, Dss)
class ForwardShockablePolicyLottery1D(PolicyLottery1D, ForwardShockableTransition):
def __init__(self, i, pi, grid, Dss):
super().__init__(i, pi, grid)
self.Dss = Dss.reshape(self.flatshape)
self.space = grid[self.i+1] - grid[self.i]
def forward_shock(self, da):
pi_shock = - da.reshape(self.flatshape) / self.space
return het_compiled.forward_policy_shock_1d(self.Dss, self.i, pi_shock).reshape(self.shape)
def lottery_2d(a, b, a_grid, b_grid, monotonic=False):
if not monotonic:
return PolicyLottery2D(*interpolate_coord_robust(a_grid, a),
*interpolate_coord_robust(b_grid, b), a_grid, b_grid)
if monotonic:
# right now we have no monotonic 2D examples, so this shouldn't be called
return PolicyLottery2D(*interpolate_coord(a_grid, a),
*interpolate_coord(b_grid, b), a_grid, b_grid)
class PolicyLottery2D(Transition):
def __init__(self, i1, pi1, i2, pi2, grid1, grid2):
# flatten non-policy dimensions into one because that's what methods accept
self.i1 = i1.reshape((-1,) + grid1.shape + grid2.shape)
self.flatshape = self.i1.shape
self.i2 = i2.reshape(self.flatshape)
self.pi1 = pi1.reshape(self.flatshape)
self.pi2 = pi2.reshape(self.flatshape)
# but store original shape so we can convert all outputs to it
self.shape = i1.shape
self.grid1 = grid1
self.grid2 = grid2
# also store shape of the endogenous grid itself
self.endog_shape = self.shape[-2:]
def forward(self, D):
return het_compiled.forward_policy_2d(D.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
def expectation(self, X):
return het_compiled.expectation_policy_2d(X.reshape(self.flatshape), self.i1, self.i2,
self.pi1, self.pi2).reshape(self.shape)
def forward_shockable(self, Dss):
return ForwardShockablePolicyLottery2D(self.i1.reshape(self.shape), self.pi1.reshape(self.shape),
self.i2.reshape(self.shape), self.pi2.reshape(self.shape),
self.grid1, self.grid2, Dss)
class ForwardShockablePolicyLottery2D(PolicyLottery2D, ForwardShockableTransition):
def __init__(self, i1, pi1, i2, pi2, grid1, grid2, Dss):
super().__init__(i1, pi1, i2, pi2, grid1, grid2)
self.Dss = Dss.reshape(self.flatshape)
self.space1 = grid1[self.i1+1] - grid1[self.i1]
self.space2 = grid2[self.i2+1] - grid2[self.i2]
def forward_shock(self, da):
da1, da2 = da
pi_shock1 = -da1.reshape(self.flatshape) / self.space1
pi_shock2 = -da2.reshape(self.flatshape) / self.space2
return het_compiled.forward_policy_shock_2d(self.Dss, self.i1, self.i2, self.pi1, self.pi2,
pi_shock1, pi_shock2).reshape(self.shape)
class Markov(Transition):
def __init__(self, Pi, i):
self.Pi = Pi
self.Pi_T = self.Pi.T
if isinstance(self.Pi_T, np.ndarray):
# optimization: copy to get right order in memory
self.Pi_T = self.Pi_T.copy()
self.i = i
def forward(self, D):
return multiply_ith_dimension(self.Pi_T, self.i, D)
def expectation(self, X):
return multiply_ith_dimension(self.Pi, self.i, X)
def forward_shockable(self, Dss):
return ForwardShockableMarkov(self.Pi, self.i, Dss)
def expectation_shockable(self, Xss):
return ExpectationShockableMarkov(self.Pi, self.i, Xss)
def stationary(self, pi_seed, tol=1E-11, maxit=10_000):
return general_stationary(self.Pi, pi_seed, tol, maxit)
class ForwardShockableMarkov(Markov, ForwardShockableTransition):
def __init__(self, Pi, i, Dss):
super().__init__(Pi, i)
self.Dss = Dss
def forward_shock(self, dPi):
return multiply_ith_dimension(dPi.T, self.i, self.Dss)
class ExpectationShockableMarkov(Markov, ExpectationShockableTransition):
def __init__(self, Pi, i, Xss):
super().__init__(Pi, i)
self.Xss = Xss
def expectation_shock(self, dPi):
return multiply_ith_dimension(dPi, self.i, self.Xss)
class CombinedTransition(Transition):
def __init__(self, stages: Sequence[Transition]):
self.stages = stages
def forward(self, D):
for stage in self.stages:
D = stage.forward(D)
return D
def expectation(self, X):
for stage in reversed(self.stages):
X = stage.expectation(X)
return X
def forward_shockable(self, Dss):
shockable_stages = []
for stage in self.stages:
shockable_stages.append(stage.forward_shockable(Dss))
Dss = stage.forward(Dss)
return ForwardShockableCombinedTransition(shockable_stages)
def expectation_shockable(self, Xss):
shockable_stages = []
for stage in reversed(self.stages):
shockable_stages.append(stage.expectation_shockable(Xss))
Xss = stage.expectation(Xss)
return ExpectationShockableCombinedTransition(list(reversed(shockable_stages)))
def __getitem__(self, i):
return self.stages[i]
Shock = Any
ListTupleShocks = Union[List[Shock], Tuple[Shock]]
class ForwardShockableCombinedTransition(CombinedTransition, ForwardShockableTransition):
def __init__(self, stages: Sequence[ForwardShockableTransition]):
self.stages = stages
self.Dss = stages[0].Dss
def forward_shock(self, shocks: Optional[Sequence[Optional[Union[Shock, ListTupleShocks]]]]):
if shocks is None:
return None
# each entry of shocks is either a sequence (list or tuple)
dD = None
for stage, shock in zip(self.stages, shocks):
if shock is not None:
dD_shock = stage.forward_shock(shock)
else:
dD_shock = None
if dD is not None:
dD = stage.forward(dD)
if shock is not None:
dD += dD_shock
else:
dD = dD_shock
return dD
class ExpectationShockableCombinedTransition(CombinedTransition, ExpectationShockableTransition):
def __init__(self, stages: Sequence[ExpectationShockableTransition]):
self.stages = stages
self.Xss = stages[-1].Xss
def expectation_shock(self, shocks: Sequence[Optional[Union[Shock, ListTupleShocks]]]):
dX = None
for stage, shock in zip(reversed(self.stages), reversed(shocks)):
if shock is not None:
dX_shock = stage.expectation_shock(shock)
else:
dX_shock = None
if dX is not None:
dX = stage.expectation(dX)
if shock is not None:
dX += dX_shock
else:
dX = dX_shock
return dX
| 9,956 | 34.816547 | 113 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/simple_displacement.py | """Displacement handler classes used by SimpleBlock for .ss, .td, and .jac evaluation to have Dynare-like syntax"""
import numpy as np
import numbers
from warnings import warn
from ...utilities.misc import numeric_primitive
def ignore(x):
if isinstance(x, int):
return IgnoreInt(x)
elif isinstance(x, numbers.Real) and not isinstance(x, int):
return IgnoreFloat(x)
elif isinstance(x, np.ndarray):
return IgnoreVector(x)
else:
raise TypeError(f"{type(x)} is not supported. Must provide either a float or an nd.array as an argument")
class IgnoreInt(int):
"""This class ignores time displacements of a scalar.
Standard arithmetic operators including +, -, x, /, ** all overloaded to "promote" the result of
any arithmetic operation with an Ignore type to an Ignore type. e.g. type(Ignore(1) + 1) is Ignore
"""
def __repr__(self):
return f'IgnoreInt({numeric_primitive(self)})'
@property
def ss(self):
return self
def __call__(self, index):
return self
def apply(self, f, **kwargs):
return ignore(f(numeric_primitive(self), **kwargs))
def __pos__(self):
return self
def __neg__(self):
return ignore(-numeric_primitive(self))
def __add__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__radd__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) + other)
def __radd__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__add__(numeric_primitive(self))
else:
return ignore(other + numeric_primitive(self))
def __sub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rsub__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) - other)
def __rsub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__sub__(numeric_primitive(self))
else:
return ignore(other - numeric_primitive(self))
def __mul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rmul__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) * other)
def __rmul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__mul__(numeric_primitive(self))
else:
return ignore(other * numeric_primitive(self))
def __truediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rtruediv__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) / other)
def __rtruediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__truediv__(numeric_primitive(self))
else:
return ignore(other / numeric_primitive(self))
def __pow__(self, power, modulo=None):
if isinstance(power, Displace) or isinstance(power, AccumulatedDerivative):
return power.__rpow__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) ** power)
def __rpow__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__pow__(numeric_primitive(self))
else:
return ignore(other ** numeric_primitive(self))
class IgnoreFloat(float):
"""This class ignores time displacements of a scalar.
Standard arithmetic operators including +, -, x, /, ** all overloaded to "promote" the result of
any arithmetic operation with an Ignore type to an Ignore type. e.g. type(Ignore(1) + 1) is Ignore
"""
def __repr__(self):
return f'IgnoreFloat({numeric_primitive(self)})'
@property
def ss(self):
return self
def __call__(self, index):
return self
def apply(self, f, **kwargs):
return ignore(f(numeric_primitive(self), **kwargs))
def __pos__(self):
return self
def __neg__(self):
return ignore(-numeric_primitive(self))
def __add__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__radd__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) + other)
def __radd__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__add__(numeric_primitive(self))
else:
return ignore(other + numeric_primitive(self))
def __sub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rsub__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) - other)
def __rsub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__sub__(numeric_primitive(self))
else:
return ignore(other - numeric_primitive(self))
def __mul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rmul__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) * other)
def __rmul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__mul__(numeric_primitive(self))
else:
return ignore(other * numeric_primitive(self))
def __truediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rtruediv__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) / other)
def __rtruediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__truediv__(numeric_primitive(self))
else:
return ignore(other / numeric_primitive(self))
def __pow__(self, power, modulo=None):
if isinstance(power, Displace) or isinstance(power, AccumulatedDerivative):
return power.__rpow__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) ** power)
def __rpow__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__pow__(numeric_primitive(self))
else:
return ignore(other ** numeric_primitive(self))
class IgnoreVector(np.ndarray):
"""This class ignores time displacements of a np.ndarray.
See NumPy documentation on "Subclassing ndarray" for more details on the use of __new__
for this implementation."""
def __new__(cls, x):
obj = np.asarray(x).view(cls)
return obj
def __repr__(self):
return f'IgnoreVector({numeric_primitive(self)})'
@property
def ss(self):
return self
def __call__(self, index):
return self
def apply(self, f, **kwargs):
return ignore(f(numeric_primitive(self), **kwargs))
def __add__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__radd__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) + other)
def __radd__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__add__(numeric_primitive(self))
else:
return ignore(other + numeric_primitive(self))
def __sub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rsub__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) - other)
def __rsub__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__sub__(numeric_primitive(self))
else:
return ignore(other - numeric_primitive(self))
def __mul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rmul__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) * other)
def __rmul__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__mul__(numeric_primitive(self))
else:
return ignore(other * numeric_primitive(self))
def __truediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__rtruediv__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) / other)
def __rtruediv__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__truediv__(numeric_primitive(self))
else:
return ignore(other / numeric_primitive(self))
def __pow__(self, power, modulo=None):
if isinstance(power, Displace) or isinstance(power, AccumulatedDerivative):
return power.__rpow__(numeric_primitive(self))
else:
return ignore(numeric_primitive(self) ** power)
def __rpow__(self, other):
if isinstance(other, Displace) or isinstance(other, AccumulatedDerivative):
return other.__pow__(numeric_primitive(self))
else:
return ignore(other ** numeric_primitive(self))
class Displace(np.ndarray):
"""This class makes time displacements of a time path, given the steady-state value.
Needed for SimpleBlock.td()"""
def __new__(cls, x, ss=None, ss_initial=None, name='UNKNOWN'):
obj = np.asarray(x).view(cls)
obj.ss = ss
obj.ss_initial = ss_initial
obj.name = name
return obj
def __array_finalize__(self, obj):
# note by Matt: not sure what this does?
self.ss = getattr(obj, "ss", None)
self.ss_initial = getattr(obj, "ss_initial", None)
self.name = getattr(obj, "name", "UNKNOWN")
def __repr__(self):
return f'Displace({numeric_primitive(self)})'
# TODO: Implemented a very preliminary generalization of Displace to higher-dimensional (>1) ndarrays
# however the rigorous operator overloading/testing has not been checked for higher dimensions.
# (Matt: fixed so that it's the first dimension that is time dimension, consistent with everything else)
def __call__(self, index):
if index != 0:
if self.ss is None:
raise KeyError(f'Trying to call {self.name}({index}), but steady-state {self.name} not given!')
newx = np.zeros(np.shape(self))
if index > 0:
newx[:-index] = numeric_primitive(self)[index:]
newx[-index:] = self.ss
else:
newx[-index:] = numeric_primitive(self)[:index]
newx[:-index] = self.ss_initial
return Displace(newx, self.ss, self.ss_initial)
else:
return self
def apply(self, f, **kwargs):
return Displace(f(numeric_primitive(self), **kwargs), ss=f(self.ss, **kwargs), ss_initial=f(self.ss_initial, **kwargs))
def __pos__(self):
return self
def __neg__(self):
return Displace(-numeric_primitive(self), ss=-self.ss, ss_initial=-self.ss_initial)
def __add__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(self) + numeric_primitive(other),
ss=self.ss + other.ss, ss_initial=self.ss_initial + other.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(self) + numeric_primitive(other),
ss=self.ss + numeric_primitive(other), ss_initial=self.ss_initial + numeric_primitive(other))
else:
# TODO: See if there is a different, systematic way we want to handle this case.
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(self) + numeric_primitive(other),
ss=self.ss, ss_initial=self.ss_initial)
def __radd__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(other) + numeric_primitive(self),
ss=other.ss + self.ss, ss_initial=other.ss_initial + self.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(other) + numeric_primitive(self),
ss=numeric_primitive(other) + self.ss, ss_initial=numeric_primitive(other) + self.ss_initial)
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(other) + numeric_primitive(self),
ss=self.ss, ss_initial=self.ss_initial)
def __sub__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(self) - numeric_primitive(other),
ss=self.ss - other.ss, ss_initial=self.ss_initial - other.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(self) - numeric_primitive(other),
ss=self.ss - numeric_primitive(other), ss_initial=self.ss_initial - numeric_primitive(other))
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(self) - numeric_primitive(other),
ss=self.ss, ss_initial=self.ss_initial)
def __rsub__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(other) - numeric_primitive(self),
ss=other.ss - self.ss, ss_initial=other.ss_initial - self.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(other) - numeric_primitive(self),
ss=numeric_primitive(other) - self.ss, ss_initial=numeric_primitive(other) - self.ss_initial)
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(other) - numeric_primitive(self),
ss=self.ss, ss_initial=self.ss_initial)
def __mul__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(self) * numeric_primitive(other),
ss=self.ss * other.ss, ss_initial=self.ss_initial * other.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(self) * numeric_primitive(other),
ss=self.ss * numeric_primitive(other), ss_initial=self.ss_initial * numeric_primitive(other))
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(self) * numeric_primitive(other),
ss=self.ss, ss_initial=self.ss_initial)
def __rmul__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(other) * numeric_primitive(self),
ss=other.ss * self.ss, ss_initial=other.ss_initial * self.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(other) * numeric_primitive(self),
ss=numeric_primitive(other) * self.ss, ss_initial=numeric_primitive(other) * self.ss_initial)
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(other) * numeric_primitive(self),
ss=self.ss, ss_initial=self.ss_initial)
def __truediv__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(self) / numeric_primitive(other),
ss=self.ss / other.ss, ss_initial=self.ss_initial / other.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(self) / numeric_primitive(other),
ss=self.ss / numeric_primitive(other), ss_initial=self.ss_initial / numeric_primitive(other))
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(self) / numeric_primitive(other),
ss=self.ss, ss_initial=self.ss_initial)
def __rtruediv__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(other) / numeric_primitive(self),
ss=other.ss / self.ss, ss_initial=other.ss_initial / self.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(other) / numeric_primitive(self),
ss=numeric_primitive(other) / self.ss, ss_initial=numeric_primitive(other) / self.ss_initial)
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(other) / numeric_primitive(self),
ss=self.ss, ss_initial=self.ss_initial)
def __pow__(self, power):
if isinstance(power, Displace):
return Displace(numeric_primitive(self) ** numeric_primitive(power),
ss=self.ss ** power.ss, ss_initial=self.ss_initial ** power.ss_initial)
elif np.isscalar(power):
return Displace(numeric_primitive(self) ** numeric_primitive(power),
ss=self.ss ** numeric_primitive(power), ss_initial=self.ss_initial ** numeric_primitive(power))
else:
warn("\n" + f"Applying operation to {power}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(self) ** numeric_primitive(power),
ss=self.ss, ss_initial=self.ss_initial)
def __rpow__(self, other):
if isinstance(other, Displace):
return Displace(numeric_primitive(other) ** numeric_primitive(self),
ss=other.ss ** self.ss, ss_initial=other.ss_initial ** self.ss_initial)
elif np.isscalar(other):
return Displace(numeric_primitive(other) ** numeric_primitive(self),
ss=numeric_primitive(other) ** self.ss, ss_initial=numeric_primitive(other) ** self.ss_initial)
else:
warn("\n" + f"Applying operation to {other}, a vector, and {self}, a Displace." + "\n" +
f"The resulting Displace object will retain the steady-state value of the original Displace object.")
return Displace(numeric_primitive(other) ** numeric_primitive(self),
ss=self.ss, ss_initial=self.ss_initial)
class AccumulatedDerivative:
"""A container for accumulated derivative information to help calculate the sequence space Jacobian
of the outputs of a SimpleBlock with respect to its inputs.
Uses common (i, m) -> x notation as in SimpleSparse (see its docs for more details) as a sparse representation of
a Jacobian of outputs Y at any time t with respect to inputs X at any time s.
Attributes:
`.elements`: `dict`
A mapping from tuples, (i, m), to floats, x, where i is the index of the non-zero diagonal
relative to the main diagonal (0), where m is the number of initial entries missing from the diagonal
(same conceptually as in SimpleSparse), and x is the value of the accumulated derivatives.
`.f_value`: `float`
The function value of the AccumulatedDerivative to be used when applying the chain rule in finding a subsequent
simple derivative. We can think of a SimpleBlock is a composition of simple functions
(either time displacements, arithmetic operators, etc.), i.e. f_i(f_{i-1}(...f_2(f_1(y))...)), where
at each step i as we are accumulating the derivatives through each simple function, if the derivative of any
f_i requires the chain rule, we will need the function value of the previous f_{i-1} to calculate that derivative.
`._keys`: `list`
The keys from the `.elements` attribute for convenience.
`._fp_values`: `list`
The values from the `.elements` attribute for convenience. `_fp_values` stands for f prime values, i.e. the actual
values of the accumulated derivative themselves.
"""
def __init__(self, elements={(0, 0): 1.}, f_value=1.):
self.elements = elements
self.f_value = f_value
self._keys = list(self.elements.keys())
self._fp_values = np.fromiter(self.elements.values(), dtype=float)
@property
def ss(self):
return ignore(self.f_value)
def __repr__(self):
formatted = '{' + ', '.join(f'({i}, {m}): {x:.3f}' for (i, m), x in self.elements.items()) + '}'
return f'AccumulatedDerivative({formatted})'
# TODO: Rewrite this comment for clarity once confirmed that the paper's notation will change
# (i, m)/(j, n) correspond to the Q_(-i, m), Q_(-j, n) operators defined for
# Proposition 2 of the Sequence Space Jacobian paper.
# The flipped sign in the code is so that the index 'i' matches the k(i) notation
# for writing SimpleBlock functions. Thus, it follows the same convention as SimpleSparse.
# Also because __call__ on a AccumulatedDerivative is a simple shift operator, it will take the form
# Q_(-i, 0) being applied to Q_(-j, n) (following the notation in the paper)
# s.t. Q_(-i, 0) Q_(-j, n) = Q(k,l)
def __call__(self, i):
keys = [(i + j, compute_l(-i, 0, -j, n)) for j, n in self._keys]
return AccumulatedDerivative(elements=dict(zip(keys, self._fp_values)), f_value=self.f_value)
def apply(self, f, h=1e-5, **kwargs):
if f == np.log:
return AccumulatedDerivative(elements=dict(zip(self._keys,
[1 / self.f_value * x for x in self._fp_values])),
f_value=np.log(self.f_value))
else:
return AccumulatedDerivative(elements=dict(zip(self._keys, [(f(self.f_value + h, **kwargs) -
f(self.f_value - h, **kwargs)) / (2 * h) * x
for x in self._fp_values])),
f_value=f(self.f_value, **kwargs))
def __pos__(self):
return AccumulatedDerivative(elements=dict(zip(self._keys, +self._fp_values)), f_value=+self.f_value)
def __neg__(self):
return AccumulatedDerivative(elements=dict(zip(self._keys, -self._fp_values)), f_value=-self.f_value)
def __add__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, self._fp_values)),
f_value=self.f_value + numeric_primitive(other))
elif isinstance(other, AccumulatedDerivative):
elements = self.elements.copy()
for im, x in other.elements.items():
if im in elements:
elements[im] += x
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
else:
elements[im] = x
return AccumulatedDerivative(elements=elements, f_value=self.f_value + other.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __radd__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, self._fp_values)),
f_value=numeric_primitive(other) + self.f_value)
elif isinstance(other, AccumulatedDerivative):
elements = other.elements.copy()
for im, x in self.elements.items():
if im in elements:
elements[im] += x
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
else:
elements[im] = x
return AccumulatedDerivative(elements=elements, f_value=other.f_value + self.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __sub__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, self._fp_values)),
f_value=self.f_value - numeric_primitive(other))
elif isinstance(other, AccumulatedDerivative):
elements = self.elements.copy()
for im, x in other.elements.items():
if im in elements:
elements[im] -= x
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
else:
elements[im] = -x
return AccumulatedDerivative(elements=elements, f_value=self.f_value - other.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __rsub__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, -self._fp_values)),
f_value=numeric_primitive(other) - self.f_value)
elif isinstance(other, AccumulatedDerivative):
elements = other.elements.copy()
for im, x in self.elements.items():
if im in elements:
elements[im] -= x
# safeguard to retain sparsity: disregard extremely small elements (num error)
if abs(elements[im]) < 1E-14:
del elements[im]
else:
elements[im] = -x
return AccumulatedDerivative(elements=elements, f_value=other.f_value - self.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __mul__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, self._fp_values * numeric_primitive(other))),
f_value=self.f_value * numeric_primitive(other))
elif isinstance(other, AccumulatedDerivative):
return AccumulatedDerivative(elements=(self * other.f_value + other * self.f_value).elements,
f_value=self.f_value * other.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __rmul__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, numeric_primitive(other) * self._fp_values)),
f_value=numeric_primitive(other) * self.f_value)
elif isinstance(other, AccumulatedDerivative):
return AccumulatedDerivative(elements=(other * self.f_value + self * other.f_value).elements,
f_value=other.f_value * self.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __truediv__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, self._fp_values / numeric_primitive(other))),
f_value=self.f_value / numeric_primitive(other))
elif isinstance(other, AccumulatedDerivative):
return AccumulatedDerivative(elements=((other.f_value * self - self.f_value * other) /
(other.f_value ** 2)).elements,
f_value=self.f_value / other.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __rtruediv__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, -numeric_primitive(other) /
self.f_value ** 2 * self._fp_values)),
f_value=numeric_primitive(other) / self.f_value)
elif isinstance(other, AccumulatedDerivative):
return AccumulatedDerivative(elements=((self.f_value * other - other.f_value * self) /
(self.f_value ** 2)).elements, f_value=other.f_value / self.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __pow__(self, power, modulo=None):
if np.isscalar(power):
return AccumulatedDerivative(elements=dict(zip(self._keys, numeric_primitive(power) * self.f_value
** numeric_primitive(power - 1) * self._fp_values)),
f_value=self.f_value ** numeric_primitive(power))
elif isinstance(power, AccumulatedDerivative):
return AccumulatedDerivative(elements=(self.f_value ** (power.f_value - 1) * (
power.f_value * self + power * self.f_value * np.log(self.f_value))).elements,
f_value=self.f_value ** power.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def __rpow__(self, other):
if np.isscalar(other):
return AccumulatedDerivative(elements=dict(zip(self._keys, np.log(other) * numeric_primitive(other) **
self.f_value * self._fp_values)),
f_value=numeric_primitive(other) ** self.f_value)
elif isinstance(other, AccumulatedDerivative):
return AccumulatedDerivative(elements=(other.f_value ** (self.f_value - 1) * (
self.f_value * other + self * other.f_value * np.log(other.f_value))).elements,
f_value=other.f_value ** self.f_value)
else:
raise NotImplementedError("This operation is not yet supported for non-scalar arguments")
def compute_l(i, m, j, n):
"""Computes the `l` index from the composition of shift operators, Q_{i, m} Q_{j, n} = Q_{k, l} in Proposition 2
of the paper (regarding efficient multiplication of simple Jacobians)."""
if i >= 0 and j >= 0:
return max(m - j, n)
elif i >= 0 and j <= 0:
return max(m, n) + min(i, -j)
elif i <= 0 and j >= 0 and i + j >= 0:
return max(m - i - j, n)
elif i <= 0 and j >= 0 and i + j <= 0:
return max(n + i + j, m)
else:
return max(m, n + i)
# TODO: This needs its own unit test
def vectorize_func_over_time(func, *args):
"""In `args` some arguments will be Displace objects and others will be Ignore/IgnoreVector objects.
The Displace objects will have an extra time dimension (as its first dimension).
We need to ensure that `func` is evaluated at the non-time dependent steady-state value of
the Ignore/IgnoreVectors and at each of the time-dependent values, t, of the Displace objects or in other
words along its time path.
"""
d_inds = [i for i in range(len(args)) if isinstance(args[i], Displace)]
x_path = []
# np.shape(args[d_inds[0]])[0] is T, the size of the first dimension of the first Displace object
# provided in args (assume all Displaces are the same shape s.t. they're conformable)
for t in range(np.shape(args[d_inds[0]])[0]):
x_path.append(func(*[args[i][t] if i in d_inds else args[i] for i in range(len(args))]))
return np.array(x_path)
def apply_function(func, *args, **kwargs):
"""Ensure that for generic functions called within a block and acting on a Displace object
properly instantiates the steady state value of the created Displace object"""
if np.any([isinstance(x, Displace) for x in args]):
x_path = vectorize_func_over_time(func, *args)
return Displace(x_path, ss=func(*[x.ss if isinstance(x, Displace) else numeric_primitive(x) for x in args]))
elif np.any([isinstance(x, AccumulatedDerivative) for x in args]):
raise NotImplementedError(
"Have not yet implemented general apply_function functionality for AccumulatedDerivatives")
else:
return func(*args, **kwargs)
| 35,010 | 48.380818 | 127 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/support/steady_state.py | """Various lower-level functions to support the computation of steady states"""
import warnings
import numpy as np
import scipy.optimize as opt
from numbers import Real
from functools import partial
from ...utilities import misc, solvers
def instantiate_steady_state_mutable_kwargs(dissolve, block_kwargs, solver_kwargs, constrained_kwargs):
"""Instantiate mutable types from `None` default values in the steady_state function"""
if dissolve is None:
dissolve = []
if block_kwargs is None:
block_kwargs = {}
if solver_kwargs is None:
solver_kwargs = {}
if constrained_kwargs is None:
constrained_kwargs = {}
return dissolve, block_kwargs, solver_kwargs, constrained_kwargs
def provide_solver_default(unknowns):
if len(unknowns) == 1:
bounds = list(unknowns.values())[0]
if not isinstance(bounds, tuple) or bounds[0] > bounds[1]:
raise ValueError("Unable to find a compatible one-dimensional solver with provided `unknowns`.\n"
" Please provide valid lower/upper bounds, e.g. unknowns = {`a`: (0, 1)}")
else:
return "brentq"
elif len(unknowns) > 1:
init_values = list(unknowns.values())
if not np.all([isinstance(v, Real) for v in init_values]):
raise ValueError("Unable to find a compatible multi-dimensional solver with provided `unknowns`.\n"
" Please provide valid initial values, e.g. unknowns = {`a`: 1, `b`: 2}")
else:
return "broyden_custom"
else:
raise ValueError("`unknowns` is empty! Please provide a dict of keys/values equal to the number of unknowns"
" that need to be solved for.")
def run_consistency_check(cresid, ctol=1e-9, fragile=False):
if cresid > ctol:
if fragile:
raise RuntimeError(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
else:
warnings.warn(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
# Allow targets to be specified in the following formats
# 1) target = {"asset_mkt": 0} or ["asset_mkt"] (the standard case, where the target = 0)
# 2) target = {"r": 0.01} (allowing for the target to be non-zero)
# 3) target = {"K": "A"} (allowing the target to be another variable in potential_args)
def compute_target_values(targets, potential_args):
"""
For a given set of target specifications and potential arguments available, compute the targets.
Called as the return value for the residual function when utilizing the numerical solver.
targets: Refer to `steady_state` function docstring
potential_args: Refer to the `steady_state` function docstring for the "calibration" variable
return: A `float` (if computing a univariate target) or an `np.ndarray` (if using a multivariate target)
"""
target_values = np.empty(len(targets))
for (i, t) in enumerate(targets):
v = targets[t] if isinstance(targets, dict) else 0
if type(v) == str:
target_values[i] = potential_args[t] - potential_args[v]
else:
target_values[i] = potential_args[t] - v
# Univariate solvers require float return values (and not lists)
if len(targets) == 1:
return target_values[0]
else:
return target_values
def compare_steady_states(ss_ref, ss_comp, tol=1e-8, name_map=None, internal=True, check_same_keys=True, verbose=False):
"""Check if two steady state dicts (can be flat dicts or SteadyStateDict objects) are the same up to a tolerance"""
if name_map is None:
name_map = {}
valid = True
# Compare the steady state values present in both ss_ref and ss_comp
if internal:
if not hasattr(ss_ref, "internal") or not hasattr(ss_comp, "internal"):
warnings.warn("The provided steady state dicts do not both have .internal attrs. Will only compare"
" top-level values")
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")] + [(ss_ref.internal[i], ss_comp.internal[i], i + "_internal") for i in ss_ref.internal]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
for ds in ds_to_check:
d_ref, d_comp, level = ds
for key_ref in d_ref.keys():
if key_ref in d_comp.keys():
key_comp = key_ref
elif key_ref in name_map:
key_comp = name_map[key_ref]
else:
continue
if np.isscalar(d_ref[key_ref]):
resid = abs(d_ref[key_ref] - d_comp[key_comp])
else:
resid = np.linalg.norm(d_ref[key_ref].ravel() - d_comp[key_comp].ravel(), np.inf)
if verbose:
print(f"{key_ref} resid: {resid}")
else:
if not np.all(np.isclose(resid, 0., atol=tol)):
valid = False
# Show the steady state values present in only one of d_ref or d_comp, i.e. if there are missing keys
if check_same_keys:
d_ref_incl_mapped = set(d_ref.keys()) - set(name_map.keys())
d_comp_incl_mapped = set(d_comp.keys()) - set(name_map.values())
diff_keys = d_ref_incl_mapped.symmetric_difference(d_comp_incl_mapped)
if diff_keys:
if verbose:
print(f"At level '{level}', the keys present only one of the two steady state dicts are {diff_keys}")
valid = False
return valid
def solve_for_unknowns(residual, unknowns, solver, solver_kwargs, residual_kwargs=None,
constrained_method="linear_continuation", constrained_kwargs=None,
tol=2e-12, verbose=False):
"""Given a residual function (constructed within steady_state) and a set of bounds or initial values for
the set of unknowns, solve for the root.
residual: `function`
A function to be supplied to a numerical solver that takes unknown values as arguments
and returns computed targets.
unknowns: `dict`
Refer to the `steady_state` function docstring for the "unknowns" variable
targets: `dict`
Refer to the `steady_state` function docstring for the "targets" variable
tol: `float`
The absolute convergence tolerance of the computed target to the desired target value in the numerical solver
solver: `str`
Refer to the `steady_state` function docstring for the "solver" variable
solver_kwargs:
Refer to the `steady_state` function docstring for the "solver_kwargs" variable
return: The root[s] of the residual function as either a scalar (float) or a list of floats
"""
if residual_kwargs is None:
residual_kwargs = {}
scipy_optimize_uni_solvers = ["bisect", "brentq", "brenth", "ridder", "toms748", "newton", "secant", "halley"]
scipy_optimize_multi_solvers = ["hybr", "lm", "broyden1", "broyden2", "anderson", "linearmixing", "diagbroyden",
"excitingmixing", "krylov", "df-sane"]
# Wrap kwargs into the residual function
residual_f = partial(residual, **residual_kwargs)
if solver is None:
raise RuntimeError("Must provide a numerical solver from the following set: brentq, broyden, solved")
elif solver in scipy_optimize_uni_solvers:
initial_values_or_bounds = extract_univariate_initial_values_or_bounds(unknowns)
result = opt.root_scalar(residual_f, method=solver, xtol=tol,
**initial_values_or_bounds, **solver_kwargs)
if not result.converged:
raise ValueError(f"Steady-state solver, {solver}, did not converge.")
unknown_solutions = result.root
elif solver in scipy_optimize_multi_solvers:
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
result = opt.root(residual_f, initial_values,
method=solver, tol=tol, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
result = opt.root(constrained_residual, initial_values,
method=solver, tol=tol, **solver_kwargs)
if not result.success:
raise ValueError(f"Steady-state solver, {solver}, did not converge."
f" The termination status is {result.status}.")
unknown_solutions = list(result.x)
# TODO: Implement a more general interface for custom solvers, so we don't need to add new elifs at this level
# everytime a new custom solver is implemented.
elif solver == "broyden_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.broyden_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.broyden_solver(constrained_residual, initial_values,
verbose=verbose, tol=tol, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "newton_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.newton_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.newton_solver(constrained_residual, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "solved":
# If the model either doesn't require a numerical solution or is being evaluated at a candidate solution
# simply call residual_f once to populate the `ss_values` dict
residual_f(unknowns.values())
unknown_solutions = unknowns.values()
else:
raise RuntimeError(f"steady_state is not yet compatible with {solver}.")
return dict(misc.smart_zip(unknowns.keys(), unknown_solutions))
def extract_univariate_initial_values_or_bounds(unknowns):
val = next(iter(unknowns.values()))
if np.isscalar(val):
return {"x0": val}
else:
return {"bracket": (val[0], val[1])}
def extract_multivariate_initial_values_and_bounds(unknowns, fragile=False):
"""Provided a dict mapping names of unknowns to initial values/bounds, return separate dicts of
the initial values and bounds.
Note: For one-sided bounds, simply put np.inf/-np.inf as the other side of the bounds, so there is
no ambiguity about which is the unconstrained side.
"""
initial_values = []
multi_bounds = {}
for k, v in unknowns.items():
if np.isscalar(v):
initial_values.append(v)
elif len(v) == 2:
if fragile:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
else:
warnings.warn("Interpreting values of `unknowns` from length 2 tuple as lower and upper bounds"
" and averaging them to get a scalar initial value to provide to the solver.")
initial_values.append((v[0] + v[1])/2)
elif len(v) == 3:
lb, iv, ub = v
assert lb < iv < ub
initial_values.append(iv)
multi_bounds[k] = (lb, ub)
else:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
return np.asarray(initial_values), multi_bounds
def residual_with_linear_continuation(residual, bounds, eval_at_boundary=False,
boundary_epsilon=1e-4, penalty_scale=1e1,
verbose=False):
"""Modify a residual function to implement bounds by an additive penalty for exceeding the boundaries
provided, scaled by the amount the guess exceeds the boundary.
e.g. For residual function f(x), desiring x in (0, 1) (so assuming eval_at_boundary = False)
If the guess for x is 1.1 then we will censor to x_censored = 1 - boundary_epsilon, and return
f(x_censored) + penalty (where the penalty does not require re-evaluating f() which may be costly)
residual: `function`
The function whose roots we want to solve for
bounds: `dict`
A dict mapping the names of the unknowns (`str`) to length two tuples corresponding to the lower and upper
bounds.
eval_at_boundary: `bool`
Whether to allow the residual function to be evaluated at exactly the boundary values or not.
Think of it as whether the solver will treat the bounds as creating a closed or open set for the search space.
boundary_epsilon: `float`
The amount to adjust the proposed guess, x, by to calculate the censored value of the residual function,
when the proposed guess exceeds the boundaries.
penalty_scale: `float`
The linear scaling factor for adjusting the penalty for the proposed unknown values exceeding the boundary.
verbose: `bool`
Whether to print out additional information for how the constrained residual function is behaving during
optimization. Useful for tuning the solver.
"""
lbs = np.asarray([v[0] for v in bounds.values()])
ubs = np.asarray([v[1] for v in bounds.values()])
def constr_residual(x, residual_cache=[]):
"""Implements a constrained residual function, where any attempts to evaluate x outside of the
bounds provided will result in a linear penalty function scaled by `penalty_scale`.
Note: We are purposefully using residual_cache as a mutable default argument to cache the most recent
valid evaluation (maintain state between function calls) of the residual function to induce solvers
to backstep if they encounter a region of the search space that returns nan values.
See Hitchhiker's Guide to Python post on Mutable Default Arguments: "When the Gotcha Isn't a Gotcha"
"""
if eval_at_boundary:
x_censored = np.where(x < lbs, lbs, x)
x_censored = np.where(x > ubs, ubs, x_censored)
else:
x_censored = np.where(x < lbs, lbs + boundary_epsilon, x)
x_censored = np.where(x > ubs, ubs - boundary_epsilon, x_censored)
residual_censored = residual(x_censored)
if verbose:
print(f"Attempted x is {x}")
print(f"Censored x is {x_censored}")
print(f"The residual_censored is {residual_censored}")
if np.any(np.isnan(residual_censored)):
# Provide a scaled penalty to the solver when trying to evaluate residual() in an undefined region
residual_censored = residual_cache[0] * penalty_scale
if verbose:
print(f"The new residual_censored is {residual_censored}")
else:
if not residual_cache:
residual_cache.append(residual_censored)
else:
residual_cache[0] = residual_censored
if verbose:
print(f"The residual_cache is {residual_cache[0]}")
# Provide an additive, scaled penalty to the solver when trying to evaluate residual() outside of the boundary
residual_with_boundary_penalty = residual_censored + \
(x - x_censored) * penalty_scale * residual_censored
return residual_with_boundary_penalty
return constr_residual
def constrained_multivariate_residual(residual, bounds, method="linear_continuation", verbose=False,
**constrained_kwargs):
"""Return a constrained version of the residual function, which accounts for bounds, using the specified method.
See the docstring of the specific method of interest for further details."""
if method == "linear_continuation":
return residual_with_linear_continuation(residual, bounds, verbose=verbose, **constrained_kwargs)
# TODO: Implement logistic transform as another option for constrained multivariate residual
else:
raise ValueError(f"Method {method} for constrained multivariate root-finding has not yet been implemented.")
| 18,934 | 50.734973 | 145 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/auxiliary_blocks/jacobiandict_block.py | """A simple wrapper for JacobianDicts to be embedded in DAGs"""
from ..block import Block
from ...classes import ImpulseDict, JacobianDict
class JacobianDictBlock(JacobianDict, Block):
"""A wrapper for nested dicts/JacobianDicts passed directly into DAGs to ensure method compatibility"""
def __init__(self, nesteddict, outputs=None, inputs=None, name=None):
super().__init__(nesteddict, outputs=outputs, inputs=inputs, name=name)
Block.__init__(self)
def __repr__(self):
return f"<JacobianDictBlock outputs={self.outputs}, inputs={self.inputs}>"
def _impulse_linear(self, ss, inputs, outputs, Js):
return ImpulseDict(self.jacobian(ss, list(inputs.keys()), outputs, inputs.T, Js).apply(inputs))
def _jacobian(self, ss, inputs, outputs, T):
if not inputs <= self.inputs:
raise KeyError(f'Asking JacobianDictBlock for {inputs - self.inputs}, which are among its inputs {self.inputs}')
if not outputs <= self.outputs:
raise KeyError(f'Asking JacobianDictBlock for {outputs - self.outputs}, which are among its outputs {self.outputs}')
return self[outputs, inputs]
| 1,166 | 47.625 | 128 | py |
sequence-jacobian | sequence-jacobian-master/src/sequence_jacobian/blocks/auxiliary_blocks/__init__.py | """Auxiliary Block types for building a coherent backend for Block handling"""
| 79 | 39 | 78 | py |
sequence-jacobian | sequence-jacobian-master/tests/conftest.py | """Fixtures used by tests."""
import pytest
from sequence_jacobian.examples import rbc, krusell_smith, hank, two_asset
@pytest.fixture(scope='session')
def rbc_dag():
return rbc.dag()
@pytest.fixture(scope='session')
def krusell_smith_dag():
return krusell_smith.dag()
@pytest.fixture(scope='session')
def one_asset_hank_dag():
return hank.dag()
@pytest.fixture(scope='session')
def two_asset_hank_dag():
return two_asset.dag()
@pytest.fixture(scope='session')
def ks_remapped_dag():
return krusell_smith.remapped_dag()
| 552 | 16.83871 | 74 | py |
sequence-jacobian | sequence-jacobian-master/tests/__init__.py | """All tests""" | 15 | 15 | 15 | py |
sequence-jacobian | sequence-jacobian-master/tests/robustness/test_steady_state.py | """Tests for steady_state with worse initial guesses, making use of the constrained solution functionality"""
import pytest
import numpy as np
# Filter out warnings when the solver is trying to search in bad regions
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in.*:RuntimeWarning")
def test_hank_steady_state_w_bad_init_guesses_and_bounds(one_asset_hank_dag):
dag_ss, ss, dag, *_ = one_asset_hank_dag
calibration = {"r": 0.005, "rstar": 0.005, "eis": 0.5, "frisch": 0.5, "B": 5.6, "mu": 1.2,
"rho_s": 0.966, "sigma_s": 0.5, "kappa": 0.1, "phi": 1.5, "Y": 1, "Z": 1, "L": 1,
"pi": 0, "nS": 2, "amax": 150, "nA": 10}
unknowns_ss = {"beta": (0.95, 0.97, 0.999 / (1 + 0.005)), "vphi": (0.001, 1.0, 10.)}
targets_ss = {"asset_mkt": 0, "labor_mkt": 0}
cali = dag_ss.solve_steady_state(calibration, unknowns_ss, targets_ss, solver="hybr",
constrained_kwargs={"boundary_epsilon": 5e-3, "penalty_scale": 100})
ss_ref = dag.steady_state(cali)
for k in ss.keys():
assert np.all(np.isclose(ss[k], ss_ref[k]))
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in.*:RuntimeWarning")
def test_two_asset_steady_state_w_bad_init_guesses_and_bounds(two_asset_hank_dag):
dag_ss, ss, dag, *_ = two_asset_hank_dag
# Steady State
calibration = {"Y": 1., "r": 0.0125, "rstar": 0.0125, "tot_wealth": 14, "delta": 0.02,
"kappap": 0.1, "muw": 1.1, 'N': 1.0, 'K': 10., 'pi': 0.0,
"Bh": 1.04, "Bg": 2.8, "G": 0.2, "eis": 0.5, "frisch": 1, "chi0": 0.25, "chi2": 2,
"epsI": 4, "omega": 0.005, "kappaw": 0.1, "phi": 1.5, "nZ": 3, "nB": 10, "nA": 16,
"nK": 4, "bmax": 50, "amax": 4000, "kmax": 1, "rho_z": 0.966, "sigma_z": 0.92}
unknowns_ss = {"beta": 0.976, "chi1": 6.5}
targets_ss = {"asset_mkt": 0., "B": "Bh"}
cali = dag_ss.solve_steady_state(calibration, unknowns_ss, targets_ss,
solver="broyden_custom")
ss_ref = dag.steady_state(cali)
for k in ss.keys():
assert np.all(np.isclose(ss[k], ss_ref[k]))
| 2,187 | 51.095238 | 109 | py |
sequence-jacobian | sequence-jacobian-master/tests/robustness/__init__.py | """Tests to check for code robustness, including error checking and attempts to use models with bad initializations."""
| 120 | 59.5 | 119 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_remap.py | import numpy as np
from sequence_jacobian import simple, solved, combine
@simple
def matching(theta, ell, kappa):
f = theta / (1 + theta ** ell) ** (1 / ell)
qfill = f / theta
hiring_cost = kappa / qfill
return f, qfill, hiring_cost
@solved(unknowns={'h': (0, 1)}, targets=['jc_res'])
def job_creation(h, w, beta, s, hiring_cost):
jc_res = h - w + beta * (1 - s(+1)) * hiring_cost(+1) - hiring_cost
return jc_res
@solved(unknowns={'N': (0.5, 1)}, targets=['N_lom'])
def labor_lom(h, w, N, s, qfill, f, theta, hiring_cost):
N_lom = (1 - s * (1 - f)) * N(-1) + f * (1 - N(-1)) - N
U = 1 - N
v = theta * U
vacancy_cost = hiring_cost * qfill * v
Div_labor = (h - w) * N - vacancy_cost
return N_lom, U, v, vacancy_cost, Div_labor
@simple
def dmp_aggregate(U_men, U_women, Div_labor_men, Div_labor_women, vacancy_cost_men, vacancy_cost_women):
U = (U_men + U_women) / 2
Div_labor = (Div_labor_men + Div_labor_women) / 2
vacancy_cost = (vacancy_cost_men + vacancy_cost_women) / 2
return U, Div_labor, vacancy_cost
def test_remap_combined_block():
dmp = combine([matching, job_creation, labor_lom], name='DMP')
dmp_men = dmp.rename(suffix='_men')
dmp_women = dmp.rename(suffix='_women')
# remap some inputs and all outputs
to_remap = ['theta', 'ell', 's'] + list(dmp_men.outputs)
dmp_men = dmp_men.remap({k: k + '_men' for k in to_remap})
dmp_women = dmp_women.remap({k: k + '_women' for k in to_remap})
# combine remapped blocks
dmp_all = combine([dmp_men, dmp_women, dmp_aggregate], name='dmp_all')
| 1,605 | 31.77551 | 104 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_two_asset.py | """Test the two asset HANK steady state computation"""
import numpy as np
from sequence_jacobian.hetblocks import hh_twoasset as hh
from sequence_jacobian import utilities as utils
def test_hank_ss():
A, B, UCE = hank_ss_singlerun()
assert np.isclose(A, 12.526539492650361)
assert np.isclose(B, 1.0840860793350566)
assert np.isclose(UCE, 4.5102870939550055)
def hank_ss_singlerun(beta=0.976, r=0.0125, tot_wealth=14, K=10, delta=0.02, Bg=2.8, G=0.2,
eis=0.5, chi0=0.25, chi1=6.5, chi2=2, omega=0.005, nZ=3, nB=50,
nA=70, nK=50, bmax=50, amax=4000, kmax=1, rho_z=0.966, sigma_z=0.92):
"""Mostly cribbed from two_asset.hank_ss(), but just does backward iteration to get
a partial equilibrium household steady state given parameters, not solving for equilibrium.
Convenient for testing."""
# set up grid
b_grid = utils.discretize.agrid(amax=bmax, n=nB)
a_grid = utils.discretize.agrid(amax=amax, n=nA)
k_grid = utils.discretize.agrid(amax=kmax, n=nK)[::-1].copy()
e_grid, _, Pi = utils.discretize.markov_rouwenhorst(rho=rho_z, sigma=sigma_z, N=nZ)
# solve analytically what we can
mc = 1 - r * (tot_wealth - Bg - K)
alpha = (r + delta) * K / mc
w = (1 - alpha) * mc
tax = (r * Bg + G) / w
ra = r
rb = r - omega
z_grid = (1 - tax) * w * e_grid
# figure out initializer
calibration = {'Pi': Pi, 'a_grid': a_grid, 'b_grid': b_grid, 'e_grid': e_grid,
'z_grid': z_grid, 'k_grid': k_grid, 'beta': beta, 'N': 1.0,
'tax': tax, 'w': w, 'eis': eis, 'rb': rb, 'ra': ra,
'chi0': chi0, 'chi1': chi1, 'chi2': chi2}
out = hh.hh.steady_state(calibration)
return out['A'], out['B'], out['UCE']
def test_Psi():
np.random.seed(41234)
chi0, chi1, chi2 = 0.25, 6.5, 2.3
ra = 0.05
a = np.random.rand(50) + 1
ap = np.random.rand(50) + 1
oPsi, oPsi1, oPsi2 = hh.get_Psi_and_deriv(ap, a, ra, chi0, chi1, chi2)
Psi = Psi_correct(ap, a, ra, chi0, chi1, chi2)
assert np.allclose(oPsi, Psi)
# compare two-sided numerical derivative to our analytical one
# numerical doesn't work well at kink of "abs" function, so this would fail
# for some seeds if chi2 was less than 2
Psi1 = (Psi_correct(ap+1E-4, a, ra, chi0, chi1, chi2) -
Psi_correct(ap-1E-4, a, ra, chi0, chi1, chi2)) / 2E-4
assert np.allclose(oPsi1, Psi1)
Psi2 = (Psi_correct(ap, a+1E-4, ra, chi0, chi1, chi2) -
Psi_correct(ap, a-1E-4, ra, chi0, chi1, chi2)) / 2E-4
assert np.allclose(oPsi2, Psi2)
def Psi_correct(ap, a, ra, chi0, chi1, chi2):
"""Original Psi function that we know is correct, once denominator has power
chi2-1 rather than 1 (error in original code)"""
return chi1 / chi2 * np.abs((ap - (1 + ra) * a)) ** chi2 / ((1 + ra) * a + chi0) ** (chi2 - 1) | 2,907 | 36.766234 | 98 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_steady_state.py | """Test all models' steady state computations"""
import numpy as np
from sequence_jacobian.examples import rbc, krusell_smith, hank, two_asset
# def test_rbc_steady_state(rbc_dag):
# _, ss, *_ = rbc_dag
# ss_ref = rbc.rbc_ss()
# assert set(ss.keys()) == set(ss_ref.keys())
# for k in ss.keys():
# assert np.all(np.isclose(ss[k], ss_ref[k]))
# def test_ks_steady_state(krusell_smith_dag):
# _, ss, *_ = krusell_smith_dag
# ss_ref = krusell_smith.ks_ss(nS=2, nA=10, amax=200)
# assert set(ss.keys()) == set(ss_ref.keys())
# for k in ss.keys():
# assert np.all(np.isclose(ss[k], ss_ref[k]))
# def test_hank_steady_state(one_asset_hank_dag):
# _, ss, *_ = one_asset_hank_dag
# ss_ref = hank.hank_ss(nS=2, nA=10, amax=150)
# assert set(ss.keys()) == set(ss_ref.keys())
# for k in ss.keys():
# assert np.all(np.isclose(ss[k], ss_ref[k]))
# def test_two_asset_steady_state(two_asset_hank_dag):
# _, ss, *_ = two_asset_hank_dag
# ss_ref = two_asset.two_asset_ss(nZ=3, nB=10, nA=16, nK=4, verbose=False)
# assert set(ss.keys()) == set(ss_ref.keys())
# for k in ss.keys():
# assert np.all(np.isclose(ss[k], ss_ref[k]))
# def test_remap_steady_state(ks_remapped_dag):
# _, _, _, _, ss = ks_remapped_dag
# assert ss['beta_impatient'] < ss['beta_patient']
# assert ss['A_impatient'] < ss['A_patient']
| 1,408 | 31.022727 | 78 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_simple_block.py | """Test SimpleBlock functionality"""
import copy
import numpy as np
import pytest
from sequence_jacobian import simple
from sequence_jacobian.classes.steady_state_dict import SteadyStateDict
@simple
def F(K, L, Z, alpha):
Y = Z * K(-1)**alpha * L**(1-alpha)
FK = alpha * Y / K
FL = (1-alpha) * Y / L
return Y, FK, FL
@simple
def investment(Q, K, r, N, mc, Z, delta, epsI, alpha):
inv = (K/K(-1) - 1) / (delta * epsI) + 1 - Q
val = alpha * Z(+1) * (N(+1) / K) ** (1-alpha) * mc(+1) - (K(+1)/K -
(1-delta) + (K(+1)/K - 1)**2 / (2*delta*epsI)) + K(+1)/K*Q(+1) - (1 + r(+1))*Q
return inv, val
@simple
def taylor(r, pi, phi):
i = r.ss + phi * (pi - pi.ss)
return i
@pytest.mark.parametrize("block,ss", [(F, SteadyStateDict({"K": 1, "L": 1, "Z": 1, "alpha": 0.5})),
(investment, SteadyStateDict({"Q": 1, "K": 1, "r": 0.05, "N": 1, "mc": 1,
"Z": 1, "delta": 0.05, "epsI": 2, "alpha": 0.5})),
(taylor, SteadyStateDict({"r": 0.05, "pi": 0.01, "phi": 1.5}))])
def test_block_consistency(block, ss):
"""Make sure ss, td, and jac methods are all consistent with each other.
Requires that all inputs of simple block allow calculating Jacobians"""
# get ss output
ss_results = block.steady_state(ss)
# now if we put in constant inputs, td should give us the same!
td_results = block.impulse_nonlinear(ss_results, {k: np.zeros(20) for k in ss.keys()})
for v in td_results.values():
assert np.all(v == 0)
# now get the Jacobian
J = block.jacobian(ss, inputs=block.inputs)
# now perturb the steady state by small random vectors
# and verify that the second-order numerical derivative implied by .td
# is equivalent to what we get from jac
h = 1E-5
all_shocks = {i: np.random.rand(10) for i in block.inputs}
td_up = block.impulse_nonlinear(ss_results, {i: h*shock for i, shock in all_shocks.items()})
td_dn = block.impulse_nonlinear(ss_results, {i: -h*shock for i, shock in all_shocks.items()})
linear_impulses = {o: (td_up[o] - td_dn[o])/(2*h) for o in block.outputs}
linear_impulses_from_jac = {o: sum(J[o][i] @ all_shocks[i] for i in all_shocks if i in J[o]) for o in block.outputs}
for o in linear_impulses:
assert np.all(np.abs(linear_impulses[o] - linear_impulses_from_jac[o]) < 1E-5)
| 2,476 | 37.107692 | 120 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_dchoice.py | '''
SIM model with labor force participation choice
- state space: (s, x, e, a)
- s is employment
- 0: employed, 1: unemployed, 2: out of labor force
- x is matching
- 0: matched, 1: unmatched
- e is labor productivity
- a is assets
'''
import numpy as np
from numba import njit
from sequence_jacobian.blocks.stage_block import StageBlock
from sequence_jacobian.blocks.support.stages import Continuous1D, ExogenousMaker, LogitChoice
from sequence_jacobian import markov_rouwenhorst, agrid
from sequence_jacobian.classes.impulse_dict import ImpulseDict
from sequence_jacobian.utilities.misc import nonconcave
from sequence_jacobian.utilities.interpolate import interpolate_coord_njit, apply_coord_njit, interpolate_point
'''Setup: utility function, hetinputs, initializer'''
@njit
def util(c, eis):
if eis == 1:
u = np.log(c)
else:
u = c ** (1 - 1 / eis) / (1 - 1 / eis)
return u
def make_grids(rho_e, sd_e, nE, amin, amax, nA):
e_grid, e_dist, Pi_e = markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE)
a_grid = agrid(amin=amin, amax=amax, n=nA)
return e_grid, e_dist, Pi_e, a_grid
def labor_income(a_grid, e_grid, atw, b, s, f, r):
y = e_grid[np.newaxis, :] * np.array([atw, b, b])[:, np.newaxis]
coh = (1 + r) * a_grid[np.newaxis, np.newaxis, :] + y[..., np.newaxis]
Pi_s = np.array([[1 - s, s], [f, (1 - f)], [0, 1]])
return y, coh, Pi_s
def backward_init(coh, a_grid, eis):
V = util(0.1 * coh, eis) / 0.01
Va = np.empty_like(V)
Va[..., 1:-1] = (V[..., 2:] - V[..., :-2]) / (a_grid[2:] - a_grid[:-2])
Va[..., 0] = (V[..., 1] - V[..., 0]) / (a_grid[1] - a_grid[0])
Va[..., -1] = (V[..., -1] - V[..., -2]) / (a_grid[-1] - a_grid[-2])
return V, Va
'''Consumption-savings stage: : (s, e, a) -> (s, e, a')'''
def consav(V, Va, a_grid, coh, y, r, beta, eis):
"""DC-EGM algorithm"""
# EGM step
W = beta * V
uc_endo= beta * Va
c_endo= uc_endo** (-eis)
a_endo= (c_endo+ a_grid[np.newaxis, np.newaxis, :] - y[:, :, np.newaxis]) / (1 + r)
# upper envelope step
V, c = upper_envelope(Va, W, a_endo, c_endo, coh, a_grid, eis)
# update Va, report asset policy
uc = c ** (-1 / eis)
Va = (1 + r) * uc
a = coh - c
return V, Va, a, c
def upper_envelope(Va, W, a_endo, c_endo, coh, a_grid, *args):
# identify bounds of nonconcave region
ilower, iupper = nonconcave(Va)
# upper envelope
shape = W.shape
W = W.reshape((-1, shape[-1]))
a_endo = a_endo.reshape((-1, shape[-1]))
c_endo = c_endo.reshape((-1, shape[-1]))
coh = coh.reshape((-1, shape[-1]))
ilower = ilower.reshape(-1)
iupper = iupper.reshape(-1)
V, c = upper_envelope_core(ilower, iupper, W, a_endo, c_endo, coh, a_grid, *args)
return V.reshape(shape), c.reshape(shape)
@njit
def upper_envelope_core(ilower, iupper, W, a_endo, c_endo, coh, a_grid, *args):
"""Interpolate value function and consumption to exogenous grid."""
nB, nA = W.shape
c = np.zeros_like(W)
V = -np.inf * np.ones_like(W)
for ib in range(nB):
ilower_cur = ilower[ib]
iupper_cur = iupper[ib]
# Below nonconcave region: exploit monotonicity
if ilower_cur > 0:
ai, api = interpolate_coord_njit(a_endo[ib, :ilower_cur], a_grid[:ilower_cur])
c0 = apply_coord_njit(ai, api, c_endo[ib, :ilower_cur])
W0 = apply_coord_njit(ai, api, W[ib, :ilower_cur])
c[ib, :ilower_cur] = c0
V[ib, :ilower_cur] = util(c0, *args) + W0
# Nonconcave region: check everything
for ia in range(ilower_cur, iupper_cur):
acur = a_grid[ia]
for ja in range(nA - 1):
ap_low = a_endo[ib, ja]
ap_high = a_endo[ib, ja + 1]
interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high)
extrap = (ja == nA - 2) and (acur > a_endo[ib, nA - 1])
if interp or extrap:
c0 = interpolate_point(acur, ap_low, ap_high, c_endo[ib, ja], c_endo[ib, ja+1])
W0 = interpolate_point(acur, ap_low, ap_high, W[ib, ja], W[ib, ja + 1])
V0 = util(c0, *args) + W0
if V0 > V[ib, ia]:
V[ib, ia] = V0
c[ib, ia] = c0
# Above nonconcave region: exploit monotonicity
if iupper_cur > 0:
ai, api = interpolate_coord_njit(a_endo[ib, iupper_cur:], a_grid[iupper_cur:])
c0 = apply_coord_njit(ai, api, c_endo[ib, iupper_cur:])
W0 = apply_coord_njit(ai, api, W[ib, iupper_cur:])
c[ib, iupper_cur:] = c0
V[ib, iupper_cur:] = util(c0, *args) + W0
# Enforce borrowing constraint
ia = 0
while ia < nA and a_grid[ia] <= a_endo[ib, 0]:
c[ib, ia] = coh[ib, ia]
V[ib, ia] = util(c[ib, ia], *args) + W[ib, 0]
ia += 1
return V, c
'''Logit choice stage: (x, z, a) -> (s, z, a)'''
def participation(V, vphi, chi):
'''adjustments to flow utility associated with x -> s choice, implements constraints on discrete choice'''
flow_u = np.zeros((3, 2,) + V.shape[-2:]) # (s, x, z, a)
flow_u[0, ...] = -vphi # employed
flow_u[1, ...] = -chi # unemployed
flow_u[0, 1, ...] = -np.inf # unmatched -> employed
return flow_u
'''Put stages together'''
consav_stage = Continuous1D(backward=['Va', 'V'], policy='a', f=consav, name='consav')
labsup_stage = LogitChoice(value='V', backward='Va', index=0,
taste_shock_scale='taste_shock',
f=participation, name='dchoice')
search_stage = ExogenousMaker(markov_name='Pi_s', index=0, name='search_shock')
prod_stage = ExogenousMaker(markov_name='Pi_e', index=1, name='prod_shock')
hh = StageBlock([prod_stage, search_stage, labsup_stage, consav_stage],
backward_init=backward_init, hetinputs=[make_grids, labor_income], name='household')
def test_runs():
calibration = {'taste_shock': 0.01, 'r': 0.005, 'beta': 0.97, 'eis': 0.5,
'vphi': 0.3, 'chi': 0.3, 'rho_e': 0.95, 'sd_e': 0.5, 'nE': 7, 'amin': .0, 'amax': 200.0, 'nA': 200, 'atw': 1.0, 'b': 0.5, 's': 0.1, 'f': 0.4}
ss1 = hh.steady_state(calibration)
ss2 = hh.steady_state({**calibration,
'V': 0.9*ss1.internals['household']['consav']['V'],
'Va': 0.9*ss1.internals['household']['consav']['Va']})
# test steady-state equivalence (from different starting point)
assert np.isclose(ss1['A'], ss2['A'])
assert np.isclose(ss1['C'], ss2['C'])
assert np.allclose(ss1.internals['household']['consav']['D'], ss2.internals['household']['consav']['D'])
assert np.allclose(ss1.internals['household']['consav']['a'], ss2.internals['household']['consav']['a'])
assert np.allclose(ss1.internals['household']['consav']['c'], ss2.internals['household']['consav']['c'])
inputs = ['r', 'atw', 'f']
outputs = ['A', 'C']
T = 50
J = hh.jacobian(ss1, inputs, outputs, T)
# impulse responses
shock = ImpulseDict({'f': 0.5 ** np.arange(50)})
td_lin = hh.impulse_linear(ss1, shock, outputs=['C'])
td_nonlin = hh.impulse_nonlinear(ss1, shock * 1E-4, outputs=['C'])
td_ghost = hh.impulse_nonlinear(ss1, shock * 0.0, outputs=['C'])
td_nonlin = td_nonlin - td_ghost
assert np.allclose(td_lin['C'], td_nonlin['C'] / 1E-4, atol=1E-5)
| 7,609 | 36.487685 | 160 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_estimation.py | """Test all models' estimation calculations"""
''
import pytest
import numpy as np
from sequence_jacobian import estimation
# See test_determinacy.py for the to-do describing this suppression
@pytest.mark.filterwarnings("ignore:.*cannot be safely interpreted as an integer.*:DeprecationWarning")
def test_krusell_smith_estimation(krusell_smith_dag):
_, ss, ks_model, unknowns, targets, exogenous = krusell_smith_dag
np.random.seed(41234)
T = 50
G = ks_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T)
# Step 1: Stacked impulse responses
rho = 0.9
sigma_persist = 0.1
sigma_trans = 0.2
dZ1 = rho**(np.arange(T))
dY1, dC1, dK1 = G['Y']['Z'] @ dZ1, G['C']['Z'] @ dZ1, G['K']['Z'] @ dZ1
dX1 = np.stack([dZ1, dY1, dC1, dK1], axis=1)
dZ2 = np.arange(T) == 0
dY2, dC2, dK2 = G['Y']['Z'] @ dZ2, G['C']['Z'] @ dZ2, G['K']['Z'] @ dZ2
dX2 = np.stack([dZ2, dY2, dC2, dK2], axis=1)
dX = np.stack([dX1, dX2], axis=2)
# Step 2: Obtain covariance at all leads and lags
sigmas = np.array([sigma_persist, sigma_trans])
Sigma = estimation.all_covariances(dX, sigmas)
# Step 3: Log-likelihood calculation
# random 100 observations
Y = np.random.randn(100, 4)
# 0.05 measurement error in each variable
sigma_measurement = np.full(4, 0.05)
# calculate log-likelihood
ll = estimation.log_likelihood(Y, Sigma, sigma_measurement)
assert np.isclose(ll, -59921.410111251025) | 1,474 | 31.065217 | 103 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_jacobian.py | """Test all models' Jacobian calculations"""
import numpy as np
def test_ks_jac(krusell_smith_dag):
_, ss, ks_model, unknowns, targets, exogenous = krusell_smith_dag
household, firm = ks_model['hh'], ks_model['firm']
T = 10
# Automatically calculate the general equilibrium Jacobian
G2 = ks_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T)
# Manually calculate the general equilibrium Jacobian
J_firm = firm.jacobian(ss, inputs=['K', 'Z'])
J_ha = household.jacobian(ss, T=T, inputs=['r', 'w'])
J_curlyK_K = J_ha['A']['r'] @ J_firm['r']['K'] + J_ha['A']['w'] @ J_firm['w']['K']
J_curlyK_Z = J_ha['A']['r'] @ J_firm['r']['Z'] + J_ha['A']['w'] @ J_firm['w']['Z']
J_curlyK = {'curlyK': {'K': J_curlyK_K, 'Z': J_curlyK_Z}}
H_K = J_curlyK['curlyK']['K'] - np.eye(T)
H_Z = J_curlyK['curlyK']['Z']
G = {'K': -np.linalg.solve(H_K, H_Z)} # H_K^(-1)H_Z
G['r'] = J_firm['r']['Z'] + J_firm['r']['K'] @ G['K']
G['w'] = J_firm['w']['Z'] + J_firm['w']['K'] @ G['K']
G['Y'] = J_firm['Y']['Z'] + J_firm['Y']['K'] @ G['K']
G['C'] = J_ha['C']['r'] @ G['r'] + J_ha['C']['w'] @ G['w']
for o in G:
assert np.allclose(G2[o]['Z'], G[o])
# TODO: decide whether to get rid of this or revise it with manual solve_jacobian stuff
# def test_hank_jac(one_asset_hank_dag):
# hank_model, exogenous, unknowns, targets, ss = one_asset_hank_dag
# T = 10
# # Automatically calculate the general equilibrium Jacobian
# G2 = hank_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T)
# # Manually calculate the general equilibrium Jacobian
# curlyJs, required = curlyJ_sorted(hank_model.blocks, unknowns + exogenous, ss, T)
# J_curlyH_U = forward_accumulate(curlyJs, unknowns, targets, required)
# J_curlyH_Z = forward_accumulate(curlyJs, exogenous, targets, required)
# H_U = J_curlyH_U[targets, unknowns].pack(T)
# H_Z = J_curlyH_Z[targets, exogenous].pack(T)
# G_U = JacobianDict.unpack(-np.linalg.solve(H_U, H_Z), unknowns, exogenous, T)
# curlyJs = [G_U] + curlyJs
# outputs = set().union(*(curlyJ.outputs for curlyJ in curlyJs)) - set(targets)
# G = forward_accumulate(curlyJs, exogenous, outputs, required | set(unknowns))
# for o in G:
# for i in G[o]:
# assert np.allclose(G[o][i], G2[o][i])
def test_fake_news_v_direct_method(one_asset_hank_dag):
hank_model, ss, *_ = one_asset_hank_dag
household = hank_model['hh']
T = 40
exogenous = ['r']
output_list = household.non_backward_outputs
h = 1E-4
Js = household.jacobian(ss, exogenous, T=T)
Js_direct = {o.upper(): {i: np.empty((T, T)) for i in exogenous} for o in output_list}
# run td once without any shocks to get paths to subtract against
# (better than subtracting by ss since ss not exact)
# monotonic=True lets us know there is monotonicity of policy rule, makes TD run faster
# .impulse_nonlinear requires at least one input 'shock', so we put in steady-state w
td_noshock = household.impulse_nonlinear(ss, {'w': np.zeros(T)})
for i in exogenous:
# simulate with respect to a shock at each date up to T
for t in range(T):
td_out = household.impulse_nonlinear(ss, {i: h * (np.arange(T) == t)})
# store results as column t of J[o][i] for each outcome o
for o in output_list:
Js_direct[o.upper()][i][:, t] = (td_out[o.upper()] - td_noshock[o.upper()]) / h
assert np.linalg.norm(Js['C']['r'] - Js_direct['C']['r'], np.inf) < 3e-4
| 3,585 | 40.697674 | 95 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_transitional_dynamics.py | """Test all models' non-linear transitional dynamics computations"""
import numpy as np
from sequence_jacobian import combine
from sequence_jacobian.examples import two_asset
from sequence_jacobian.hetblocks import hh_twoasset as hh
# TODO: Figure out a more robust way to check similarity of the linear and non-linear solution.
# As of now just checking that the tolerance for difference (by infinity norm) is below a manually checked threshold
def test_rbc_td(rbc_dag):
rbc_model, ss, unknowns, targets, exogenous = rbc_dag
T, impact, rho, news = 30, 0.01, 0.8, 10
G = rbc_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T)
dZ = np.empty((T, 2))
dZ[:, 0] = impact * ss['Z'] * rho**np.arange(T)
dZ[:, 1] = np.concatenate((np.zeros(news), dZ[:-news, 0]))
dC = 100 * G['C']['Z'] @ dZ / ss['C']
td_nonlin = rbc_model.solve_impulse_nonlinear(ss, unknowns, targets, inputs={"Z": dZ[:, 0]}, outputs=['C'])
td_nonlin_news = rbc_model.solve_impulse_nonlinear(ss, unknowns, targets, inputs={"Z": dZ[:, 1]}, outputs=['C'])
dC_nonlin = 100 * td_nonlin['C'] / ss['C']
dC_nonlin_news = 100 * td_nonlin_news['C'] / ss['C']
assert np.linalg.norm(dC[:, 0] - dC_nonlin, np.inf) < 3e-2
assert np.linalg.norm(dC[:, 1] - dC_nonlin_news, np.inf) < 7e-2
def test_ks_td(krusell_smith_dag):
_, ss, ks_model, unknowns, targets, exogenous = krusell_smith_dag
T = 30
G = ks_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T)
for shock_size, tol in [(0.01, 7e-3), (0.1, 0.6)]:
dZ = shock_size * 0.8 ** np.arange(T)
td_nonlin = ks_model.solve_impulse_nonlinear(ss, unknowns, targets, {"Z": dZ})
dr_nonlin = 10000 * td_nonlin['r']
dr_lin = 10000 * G['r']['Z'] @ dZ
assert np.linalg.norm(dr_nonlin - dr_lin, np.inf) < tol
def test_hank_td(one_asset_hank_dag):
_, ss, hank_model, unknowns, targets, exogenous = one_asset_hank_dag
T = 30
household = hank_model['hh']
J_ha = household.jacobian(ss=ss, T=T, inputs=['Div', 'Tax', 'r', 'w'])
G = hank_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T, Js={'hh': J_ha})
rho_r, sig_r = 0.61, -0.01/4
drstar = sig_r * rho_r ** (np.arange(T))
td_nonlin = hank_model.solve_impulse_nonlinear(ss, unknowns, targets, {"rstar": drstar}, Js={'hh': J_ha})
dC_nonlin = 100 * td_nonlin['C'] / ss['C']
dC_lin = 100 * G['C']['rstar'] @ drstar / ss['C']
assert np.linalg.norm(dC_nonlin - dC_lin, np.inf) < 3e-3
# TODO: needs to compute Jacobian of hetoutput `Chi`
def test_two_asset_td(two_asset_hank_dag):
_, ss, two_asset_model, unknowns, targets, exogenous = two_asset_hank_dag
T = 30
household = two_asset_model['hh']
J_ha = household.jacobian(ss=ss, T=T, inputs=['N', 'r', 'ra', 'rb', 'tax', 'w'])
G = two_asset_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T, Js={'hh': J_ha})
for shock_size, tol in [(0.1, 3e-4), (1, 2e-2)]:
drstar = shock_size * -0.0025 * 0.6 ** np.arange(T)
td_nonlin = two_asset_model.solve_impulse_nonlinear(ss, unknowns, targets, {"rstar": drstar},
Js={'hh': J_ha})
dY_nonlin = 100 * td_nonlin['Y']
dY_lin = 100 * G['Y']['rstar'] @ drstar
assert np.linalg.norm(dY_nonlin - dY_lin, np.inf) < tol
def test_two_asset_solved_v_simple_td(two_asset_hank_dag):
_, ss, two_asset_model, unknowns, targets, exogenous = two_asset_hank_dag
household = hh.hh.add_hetinputs([two_asset.income, two_asset.make_grids])
blocks_simple = [household, two_asset.pricing, two_asset.arbitrage,
two_asset.labor, two_asset.investment, two_asset.dividend,
two_asset.taylor, two_asset.fiscal, two_asset.share_value,
two_asset.finance, two_asset.wage, two_asset.union,
two_asset.mkt_clearing]
two_asset_model_simple = combine(blocks_simple, name="Two-Asset HANK w/ SimpleBlocks")
unknowns_simple = ["r", "w", "Y", "pi", "p", "Q", "K"]
targets_simple = ["asset_mkt", "fisher", "wnkpc", "nkpc", "equity", "inv", "val"]
T = 30
household = two_asset_model['hh']
J_ha = household.jacobian(ss=ss, T=T, inputs=['N', 'r', 'ra', 'rb', 'tax', 'w'])
G = two_asset_model.solve_jacobian(ss, unknowns, targets, exogenous, T=T, Js={'hh': J_ha})
G_simple = two_asset_model_simple.solve_jacobian(ss, unknowns_simple, targets_simple, exogenous, T=T,
Js={'hh': J_ha})
drstar = -0.0025 * 0.6 ** np.arange(T)
dY = 100 * G['Y']['rstar'] @ drstar
td_nonlin = two_asset_model.solve_impulse_nonlinear(ss, unknowns, targets, {"rstar": drstar},
Js={'hh': J_ha})
dY_nonlin = 100 * (td_nonlin['Y'] - 1)
dY_simple = 100 * G_simple['Y']['rstar'] @ drstar
td_nonlin_simple = two_asset_model_simple.solve_impulse_nonlinear(ss,
unknowns_simple, targets_simple,
{"rstar": drstar}, Js={'hh': J_ha})
dY_nonlin_simple = 100 * (td_nonlin_simple['Y'] - 1)
assert np.linalg.norm(dY_nonlin - dY_nonlin_simple, np.inf) < 2e-7
assert np.linalg.norm(dY - dY_simple, np.inf) < 0.02
| 5,401 | 42.216 | 118 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_displacement_handlers.py | """Test displacement handler classes: Ignore, IgnoreVector, Displace, Perturb, Reporter"""
import numpy as np
from sequence_jacobian.blocks.support.simple_displacement import (
IgnoreInt, IgnoreFloat, IgnoreVector, Displace, AccumulatedDerivative, numeric_primitive
)
# Define useful helper functions for testing
# Assumes "op" is an actual well-defined arithmetic operator. If necessary, implement more stringent checks
# on the "op" being passed in so nonsense doesn't come out.
# i.e. reverse_op("__round__") doesn't return reverse_op("__ound__")
def reverse_op(op):
if op[2] == "r":
return op[0:2] + op[3:]
else:
return op[0:2] + "r" + op[2:]
def apply_unary_op(op, a):
return getattr(a, op)()
def apply_binary_op(op, a1, a2):
if getattr(a1, op)(a2) is not NotImplemented:
return getattr(a1, op)(a2)
elif getattr(a2, reverse_op(op))(a1) is not NotImplemented:
return getattr(a2, reverse_op(op))(a1)
else:
raise NotImplementedError(f"{op} cannot be performed between {a1} and {a2} directly, and no"
f" valid reverse operation exists either.")
def apply_op(op, *args):
if len(args) == 1:
return apply_unary_op(op, *args)
elif len(args) == 2:
return apply_binary_op(op, *args)
else:
raise ValueError(f"apply_op only supports unary or binary operators currently. {len(args)} is an invalid"
f" number of arguments to provide.")
def test_ignore():
# Test unary operations
arg_singles = [IgnoreInt(1), IgnoreInt(1)(-1), IgnoreFloat(1), IgnoreFloat(1)(-1)]
for t1 in arg_singles:
for op in ["__neg__", "__pos__"]:
assert type(apply_op(op, t1)) == type(t1)
assert np.all(numeric_primitive(apply_op(op, t1)) == apply_op(op, numeric_primitive(t1)))
# Test binary operations
arg_pairs = [(IgnoreInt(1), 1), (1, IgnoreInt(1)), (IgnoreInt(1), IgnoreInt(2)),
(IgnoreInt(1)(-1), 1), (1, IgnoreInt(1)(-1)),
(IgnoreInt(1)(-1), IgnoreInt(2)), (IgnoreInt(1), IgnoreInt(2)(-1))]
for pair in arg_pairs:
t1, t2 = pair
for op in ["__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__"]:
# Explicitly ignoring the cases where the mirrored operation is applied to the numeric primitive
# as `t1', since the standard operator checking will first see if the
# non-mirrored operation is valid on `t2' and `t1', which it is.
# This gets around the fact that there doesn't seem to be a simple way to force valid
# arithmetic operations to be invalid on numeric sub-classes.
# E.g. when t1 = 1, t2 = IgnoreInt(1), t1.__radd__(t2) will return an int not an IgnoreInt; however
# in practice when the actual expression is w ritten as t2 + t1, t2.__add__(t1) will be checked first
# and will be deemed valid, returning an IgnoreInt
if op in ["__radd__", "__rsub__", "__rmul__"] and not (type(t1) == int and type(t2) == IgnoreInt):
assert type(apply_op(op, t1, t2)) == IgnoreInt
assert np.all(numeric_primitive(apply_op(op, t1, t2)) == apply_op(op, numeric_primitive(t1),
numeric_primitive(t2)))
arg_pairs = [(IgnoreFloat(1), 1), (1, IgnoreFloat(1)), (IgnoreFloat(1), IgnoreFloat(2)),
(IgnoreFloat(1)(-1), 1), (1, IgnoreFloat(1)(-1)),
(IgnoreFloat(1)(-1), IgnoreFloat(2)), (IgnoreFloat(1), IgnoreFloat(2)(-1))]
for pair in arg_pairs:
t1, t2 = pair
for op in ["__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__",
"__truediv__", "__rtruediv__", "__pow__", "__rpow__"]:
if op in ["__radd__", "__rsub__", "__rmul__", "__rpow__"] and not (type(t1) == float and type(t2) == IgnoreFloat):
assert type(apply_op(op, t1, t2)) == IgnoreFloat
assert np.all(numeric_primitive(apply_op(op, t1, t2)) == apply_op(op, numeric_primitive(t1),
numeric_primitive(t2)))
# Test call
for t1 in arg_singles:
assert numeric_primitive(t1) == numeric_primitive(t1(+1))
def test_ignore_vector():
# Test unary operations
arg_singles = [IgnoreVector(np.array([1, 2, 3])), IgnoreVector(np.array([1, 2, 3]))(-1)]
for t1 in arg_singles:
for op in ["__neg__", "__pos__"]:
assert type(apply_op(op, t1)) == IgnoreVector
assert np.all(numeric_primitive(apply_op(op, t1)) == apply_op(op, numeric_primitive(t1)))
# Test binary operations
arg_pairs = [(IgnoreVector(np.array([1, 2, 3])), 1),
(IgnoreVector(np.array([1, 2, 3])), IgnoreFloat(1)),
(IgnoreVector(np.array([1, 2, 3])), IgnoreVector(np.array([2, 3, 4]))),
(1, IgnoreVector(np.array([1, 2, 3]))),
(IgnoreFloat(1), IgnoreVector(np.array([1, 2, 3]))),
(IgnoreVector(np.array([1, 2, 3]))(-1), 1),
(IgnoreVector(np.array([1, 2, 3]))(-1), IgnoreFloat(1)),
(IgnoreVector(np.array([1, 2, 3]))(-1), IgnoreVector(np.array([2, 3, 4]))),
(IgnoreVector(np.array([1, 2, 3])), IgnoreVector(np.array([2, 3, 4]))(-1)),
(1, IgnoreVector(np.array([1, 2, 3]))(-1)),
(IgnoreFloat(1), IgnoreVector(np.array([1, 2, 3]))(-1))]
for pair in arg_pairs:
t1, t2 = pair
for op in ["__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__",
"__truediv__", "__rtruediv__", "__pow__", "__rpow__"]:
assert type(apply_op(op, t1, t2)) == IgnoreVector
assert np.all(numeric_primitive(apply_op(op, t1, t2)) == apply_op(op, numeric_primitive(t1),
numeric_primitive(t2)))
# Test call
for t1 in arg_singles:
assert np.all(numeric_primitive(t1) == numeric_primitive(t1(+1)))
def test_displace():
# TODO: test ss_initial being different from ss
# Test unary operations
arg_singles = [Displace(np.array([1, 2, 3]), 2, 2), Displace(np.array([1, 2, 3]), 2, 2)(-1)]
for t1 in arg_singles:
for op in ["__neg__", "__pos__"]:
assert type(apply_op(op, t1)) == Displace
assert np.all(numeric_primitive(apply_op(op, t1)) == apply_op(op, numeric_primitive(t1)))
# Test binary operations
arg_pairs = [(Displace(np.array([1, 2, 3]), 2, 2), 1),
(Displace(np.array([1, 2, 3]), 2, 2), IgnoreFloat(1)),
(Displace(np.array([1, 2, 3]), 2, 2), Displace(np.array([2, 3, 4]), 3, 3)),
(1, Displace(np.array([1, 2, 3]), 2, 2)),
(IgnoreFloat(1), Displace(np.array([1, 2, 3]), 2, 2)),
(Displace(np.array([1, 2, 3]), 2, 2)(-1), 1),
(Displace(np.array([1, 2, 3]), 2, 2)(-1), IgnoreFloat(1)),
(Displace(np.array([1, 2, 3]), 2, 2)(-1), Displace(np.array([2, 3, 4]), 3, 3)),
(Displace(np.array([1, 2, 3]), 2, 2), Displace(np.array([2, 3, 4]), 3, 3)(-1)),
(1, Displace(np.array([1, 2, 3]), 2, 2)(-1)),
(IgnoreFloat(1), Displace(np.array([1, 2, 3]), 2, 2)(-1))]
for pair in arg_pairs:
t1, t2 = pair
for op in ["__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__",
"__truediv__", "__rtruediv__", "__pow__", "__rpow__"]:
assert type(apply_op(op, t1, t2)) == Displace
assert np.all(numeric_primitive(apply_op(op, t1, t2)) == apply_op(op, numeric_primitive(t1),
numeric_primitive(t2)))
assert np.all(numeric_primitive(apply_op(op, t1, t2).ss) ==\
apply_op(op, t1.ss if isinstance(t1, Displace) else numeric_primitive(t1),
t2.ss if isinstance(t2, Displace) else numeric_primitive(t2)))
# Test call
for t1 in arg_singles:
t1_manual_displace = np.zeros(len(t1))
t1_manual_displace[:-1] = numeric_primitive(t1)[1:]
t1_manual_displace[-1:] = t1.ss
assert np.all(numeric_primitive(t1(1)) == t1_manual_displace)
def test_accumulated_derivative():
# Test unary operations
arg_singles = [AccumulatedDerivative(), AccumulatedDerivative()(-1), AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)]
for t1 in arg_singles:
for op in ["__neg__", "__pos__"]:
assert type(apply_op(op, t1)) == AccumulatedDerivative
assert np.all(np.fromiter(apply_op(op, t1).elements.values(), dtype=float) ==
np.array([apply_op(op, v) for v in t1.elements.values()]))
# TODO: Only test against scalars as of now. Will need to revisit this to test against vectors
# e.g. IgnoreVector, once hetinput/hetoutput functionality is enhanced
# Test binary operations
arg_pairs = [(AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.), 3),
(AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.), IgnoreFloat(3)),
(3, AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)),
(IgnoreFloat(3), AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)),
(AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.),
AccumulatedDerivative(elements={(1, 1): 4.}, f_value=5.)),
# TODO: Implement test for elements not in the same (i, m)
# (AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.),
# AccumulatedDerivative(elements={(1, 0): 4.}, f_value=5.)),
(AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)(-1), 3),
(AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)(-1), IgnoreFloat(3)),
(3, AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)(-1)),
(IgnoreFloat(3), AccumulatedDerivative(elements={(1, 1): 2.}, f_value=2.)(-1))]
def get_fp_value(x):
return x._fp_values[0]
for pair in arg_pairs:
t1, t2 = pair
for op in ["__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__",
"__truediv__", "__rtruediv__", "__pow__", "__rpow__"]:
assert type(apply_op(op, t1, t2)) == AccumulatedDerivative
result = get_fp_value(apply_op(op, t1, t2))
if isinstance(t1, AccumulatedDerivative) and isinstance(t2, AccumulatedDerivative):
assert apply_op(op, t1, t2).f_value == apply_op(op, t1.f_value, t2.f_value)
if op in ["__add__", "__radd__", "__sub__", "__rsub__"]:
assert result == apply_op(op, get_fp_value(t1), get_fp_value(t2))
elif op in ["__mul__", "__rmul__"]:
assert result == get_fp_value(t1) * t2.f_value + t1.f_value * get_fp_value(t2)
elif op == "__truediv__":
assert result == (t2.f_value * get_fp_value(t1) - t1.f_value * get_fp_value(t2))/t2.f_value**2
elif op == "__rtruediv__":
assert result == (t1.f_value * get_fp_value(t2) - t2.f_value * get_fp_value(t1))/t1.f_value**2
elif op == "__pow__":
assert result == (t1.f_value ** (t2.f_value - 1)) * (t2.f_value * get_fp_value(t1) +\
t1.f_value * np.log(t1.f_value) * get_fp_value(t2))
else: # op == "__rpow__":
assert result == (t2.f_value ** (t1.f_value - 1)) * (t1.f_value * get_fp_value(t2) +\
t2.f_value * np.log(t2.f_value) * get_fp_value(t1))
else:
assert apply_op(op, t1, t2).f_value == apply_op(op, t1.f_value, numeric_primitive(t2))\
if isinstance(t1, AccumulatedDerivative) else apply_op(op, numeric_primitive(t1), t2.f_value)
if op in ["__add__", "__radd__", "__sub__"]:
assert result == get_fp_value(t1) if\
isinstance(t1, AccumulatedDerivative) else get_fp_value(t2)
elif op == "__rsub__":
assert result == -get_fp_value(t1) if \
isinstance(t1, AccumulatedDerivative) else -get_fp_value(t2)
elif op in ["__mul__", "__rmul__"]:
assert result == numeric_primitive(t2) * get_fp_value(t1)\
if isinstance(t1, AccumulatedDerivative) else numeric_primitive(t1) * get_fp_value(t2)
elif op == "__truediv__":
assert result == get_fp_value(t1)/numeric_primitive(t2) if isinstance(t1, AccumulatedDerivative)\
else -numeric_primitive(t1)/t2.f_value**2 * get_fp_value(t2)
elif op == "__rtruediv__":
assert result == -numeric_primitive(t2)/t1.f_value**2 * get_fp_value(t1)\
if isinstance(t1, AccumulatedDerivative) else get_fp_value(t2)/numeric_primitive(t1)
elif op == "__pow__":
assert result == numeric_primitive(t2) * t1.f_value ** (numeric_primitive(t2) - 1) * get_fp_value(t1)\
if isinstance(t1, AccumulatedDerivative) else\
np.log(numeric_primitive(t1)) * numeric_primitive(t1) ** t2.f_value * get_fp_value(t2)
else: # op == "__rpow__"
assert result == np.log(numeric_primitive(t2)) * numeric_primitive(t2) ** t1.f_value * get_fp_value(t1)\
if isinstance(t1, AccumulatedDerivative) else\
numeric_primitive(t1) * t2.f_value ** (numeric_primitive(t1) - 1) * get_fp_value(t2) | 14,019 | 55.99187 | 130 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_multiexog.py | import numpy as np
import sequence_jacobian as sj
from sequence_jacobian import het, simple, combine
def household_init(a_grid, y, r, sigma):
c = np.maximum(1e-8, y[..., np.newaxis] + np.maximum(r, 0.04) * a_grid)
Va = (1 + r) * (c ** (-sigma))
return Va
def search_frictions(f, s):
Pi_e = np.vstack(([1 - s, s], [f, 1 - f]))
return Pi_e
def labor_income(z, w, b):
y = np.vstack((w * z, b * w * z))
return y
@simple
def income_state_vars(rho_z, sd_z, nZ):
z, _, Pi_z = sj.utilities.discretize.markov_rouwenhorst(rho=rho_z, sigma=sd_z, N=nZ)
return z, Pi_z
@simple
def asset_state_vars(amin, amax, nA):
a_grid = sj.utilities.discretize.agrid(amin=amin, amax=amax, n=nA)
return a_grid
@het(exogenous=['Pi_e', 'Pi_z'], policy='a', backward='Va', backward_init=household_init)
def household_multidim(Va_p, a_grid, y, r, beta, sigma):
c_nextgrid = (beta * Va_p) ** (-1 / sigma)
coh = (1 + r) * a_grid + y[..., np.newaxis]
a = sj.utilities.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) # (x, xq, y)
a = np.maximum(a, a_grid[0])
c = coh - a
uc = c ** (-sigma)
Va = (1 + r) * uc
return Va, a, c
@het(exogenous='Pi', policy='a', backward='Va', backward_init=household_init)
def household_onedim(Va_p, a_grid, y, r, beta, sigma):
c_nextgrid = (beta * Va_p) ** (-1 / sigma)
coh = (1 + r) * a_grid[np.newaxis, :] + y[:, np.newaxis]
a = sj.utilities.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) # (x, xq, y)
sj.utilities.optimized_routines.setmin(a, a_grid[0])
c = coh - a
uc = c ** (-sigma)
Va = (1 + r) * uc
return Va, a, c
def test_equivalence():
calibration = dict(beta=0.95, r=0.01, sigma=2, a_grid = sj.utilities.discretize.agrid(1000, 50))
e1, _, Pi1 = sj.utilities.discretize.markov_rouwenhorst(rho=0.7, sigma=0.7, N=3)
e2, _, Pi2 = sj.utilities.discretize.markov_rouwenhorst(rho=0.3, sigma=0.5, N=3)
e_multidim = np.outer(e1, e2)
e_onedim = np.kron(e1, e2)
Pi = np.kron(Pi1, Pi2)
ss_multidim = household_multidim.steady_state({**calibration, 'y': e_multidim, 'Pi_e': Pi1, 'Pi_z': Pi2})
ss_onedim = household_onedim.steady_state({**calibration, 'y': e_onedim, 'Pi': Pi})
assert np.isclose(ss_multidim['A'], ss_onedim['A']) and np.isclose(ss_multidim['C'], ss_onedim['C'])
D_onedim = ss_onedim.internals['household_onedim']['D']
D_multidim = ss_multidim.internals['household_multidim']['D']
assert np.allclose(D_onedim, D_multidim.reshape(*D_onedim.shape))
J_multidim = household_multidim.jacobian(ss_multidim, inputs = ['r'], outputs=['A'], T=10)
J_onedim = household_onedim.jacobian(ss_onedim, inputs = ['r'], outputs=['A'], T=10)
assert np.allclose(J_multidim['A','r'], J_onedim['A','r'])
def test_pishock():
calibration = dict(beta=0.95, r=0.01, sigma=2., f=0.4, s=0.1, w=1., b=0.5,
rho_z=0.9, sd_z=0.5, nZ=3, amin=0., amax=1000, nA=50)
household = household_multidim.add_hetinputs([search_frictions, labor_income])
hh = combine([household, income_state_vars, asset_state_vars])
ss = hh.steady_state(calibration)
J = hh.jacobian(ss, inputs=['f', 's', 'r'], outputs=['C'], T=10)
assert np.max(np.triu(J['C']['r'], 1)) <= 0 # low C before hike in r
assert np.min(np.tril(J['C']['r'])) >= 0 # high C after hike in r
assert np.all(J['C']['f'] > 0) # high f increases C everywhere
assert np.all(J['C']['s'] < 0) # high s decreases C everywhere
shock = 0.8**np.arange(10)
C_up = hh.impulse_nonlinear(ss, {'f': 1E-4*shock})['C']
C_dn = hh.impulse_nonlinear(ss, {'f': -1E-4*shock})['C']
dC = (C_up - C_dn)/2E-4
assert np.allclose(dC, J['C', 'f'] @ shock, atol=2E-6)
| 3,779 | 35 | 109 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_workflow.py | import numpy as np
from sequence_jacobian import simple, solved, create_model, markov_rouwenhorst, agrid
from sequence_jacobian.classes.impulse_dict import ImpulseDict
from sequence_jacobian.hetblocks.hh_sim import hh
'''Part 1: Household block'''
def make_grids(rho_e, sd_e, nE, amin, amax, nA):
e_grid, e_dist, Pi = markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE)
a_grid = agrid(amin=amin, amax=amax, n=nA)
return e_grid, e_dist, Pi, a_grid
def income(atw, N, e_grid, transfer):
y = atw * N * e_grid + transfer
return y
def get_mpcs(c, a, a_grid, r):
mpcs_ = np.empty_like(c)
post_return = (1 + r) * a_grid
mpcs_[:, 1:-1] = (c[:, 2:] - c[:, 0:-2]) / (post_return[2:] - post_return[:-2])
mpcs_[:, 0] = (c[:, 1] - c[:, 0]) / (post_return[1] - post_return[0])
mpcs_[:, -1] = (c[:, -1] - c[:, -2]) / (post_return[-1] - post_return[-2])
mpcs_[a == a_grid[0]] = 1
return mpcs_
def mpcs(c, a, a_grid, r):
mpc = get_mpcs(c, a, a_grid, r)
return mpc
def weighted_uc(c, e_grid, eis):
uce = c ** (-1 / eis) * e_grid[:, np.newaxis]
return uce
'''Part 2: rest of the model'''
@solved(unknowns={'C': 1.0, 'A': 1.0}, targets=['euler', 'budget_constraint'], solver='broyden_custom')
def household_ra(C, A, r, atw, N, transfer, beta, eis):
euler = beta * (1 + r(1)) * C(1) ** (-1 / eis) - C ** (-1 / eis)
budget_constraint = (1 + r) * A(-1) + atw * N + transfer - C - A
UCE = C ** (-1 / eis)
return euler, budget_constraint, UCE
@simple
def firm(N, Z):
Y = Z * N
w = Z
return Y, w
@simple
def union(UCE, tau, w, N, pi, muw, kappaw, nu, vphi, beta):
wnkpc = kappaw * N * (vphi * N ** nu - (1 - tau) * w * UCE / muw) + \
beta * (1 + pi(+1)).apply(np.log) - (1 + pi).apply(np.log)
return wnkpc
@solved(unknowns={'B': (0.0, 10.0)}, targets=['B_rule'], solver='brentq')
def fiscal(B, G, r, w, N, transfer, rho_B):
B_rule = B.ss + rho_B * (B(-1) - B.ss + G - G.ss) - B
rev = (1 + r) * B(-1) + G + transfer - B # revenue to be raised
tau = rev / (w * N)
atw = (1 - tau) * w
return B_rule, rev, tau, atw
# Use this to test zero impulse once we have it
# @simple
# def real_bonds(r):
# rb = r
# return rb
@simple
def mkt_clearing(A, B, C, G, Y):
asset_mkt = A - B
goods_mkt = C + G - Y
return asset_mkt, goods_mkt
'''Part 3: Helper blocks'''
@simple
def household_ra_ss(r, B, tau, w, N, transfer, eis):
beta = 1 / (1 + r)
A = B
C = r * A + (1 - tau) * w * N + transfer
UCE = C ** (-1 / eis)
return beta, A, C, UCE
@simple
def union_ss(atw, UCE, muw, N, nu, kappaw, beta, pi):
vphi = atw * UCE / (muw * N ** nu)
wnkpc = kappaw * N * (vphi * N ** nu - atw * UCE / muw) + \
beta * (1 + pi(+1)).apply(np.log) - (1 + pi).apply(np.log)
return wnkpc, vphi
'''Tests'''
def test_all():
# Assemble HA block (want to test nesting)
household_ha = hh.add_hetinputs([make_grids, income])
household_ha = household_ha.add_hetoutputs([mpcs, weighted_uc]).rename('household_ha')
# Assemble DAG (for transition dynamics)
dag = {}
common_blocks = [firm, union, fiscal, mkt_clearing]
dag['ha'] = create_model([household_ha] + common_blocks, name='HANK')
dag['ra'] = create_model([household_ra] + common_blocks, name='RANK')
unknowns = ['N', 'pi']
targets = ['asset_mkt', 'wnkpc']
# Solve steady state
calibration = {'N': 1.0, 'Z': 1.0, 'r': 0.005, 'pi': 0.0, 'eis': 0.5, 'nu': 0.5,
'rho_e': 0.91, 'sd_e': 0.92, 'nE': 3, 'amin': 0.0, 'amax': 200,
'nA': 100, 'kappaw': 0.1, 'muw': 1.2, 'transfer': 0.143, 'rho_B': 0.9}
ss = {}
# Constructing ss-dag manually works just fine
dag_ss = {}
dag_ss['ha'] = create_model([household_ha, union_ss, firm, fiscal, mkt_clearing])
ss['ha'] = dag_ss['ha'].solve_steady_state(calibration, dissolve=['fiscal'], solver='hybr',
unknowns={'beta': 0.96, 'B': 3.0, 'G': 0.2},
targets={'asset_mkt': 0.0, 'MPC': 0.25, 'tau': 0.334})
assert np.isclose(ss['ha']['goods_mkt'], 0.0)
assert np.isclose(ss['ha']['asset_mkt'], 0.0)
assert np.isclose(ss['ha']['wnkpc'], 0.0)
dag_ss['ra'] = create_model([household_ra_ss, union_ss, firm, fiscal, mkt_clearing])
ss['ra'] = dag_ss['ra'].steady_state(ss['ha'], dissolve=['fiscal'])
assert np.isclose(ss['ra']['goods_mkt'], 0.0)
assert np.isclose(ss['ra']['asset_mkt'], 0.0)
assert np.isclose(ss['ra']['wnkpc'], 0.0)
# Precompute HA Jacobian
Js = {'ra': {}, 'ha': {}}
Js['ha']['household_ha'] = household_ha.jacobian(ss['ha'],
inputs=['N', 'atw', 'r', 'transfer'], outputs=['C', 'A', 'UCE'], T=300)
# Linear impulse responses from Jacobian vs directly
shock = ImpulseDict({'G': 0.9 ** np.arange(300)})
G, td_lin1, td_lin2 = dict(), dict(), dict()
for k in ['ra', 'ha']:
G[k] = dag[k].solve_jacobian(ss[k], unknowns, targets, inputs=['G'], T=300, Js=Js[k])
td_lin1[k] = G[k] @ shock
td_lin2[k] = dag[k].solve_impulse_linear(ss[k], unknowns, targets, shock, Js=Js[k])
assert all(np.allclose(td_lin1[k][i], td_lin2[k][i]) for i in td_lin1[k])
# Nonlinear vs linear impulses (sneak in test of ss_initial here too)
td_nonlin = dag['ha'].solve_impulse_nonlinear(ss['ha'], unknowns, targets, inputs=shock*1E-2,
Js=Js, internals=['household_ha'], ss_initial=ss['ha'])
assert np.max(np.abs(td_nonlin['goods_mkt'])) < 1E-8
# See if D change matches up with aggregate assets
td_nonlin_lvl = td_nonlin + ss['ha']
td_A = np.sum(td_nonlin_lvl.internals['household_ha']['a'] * td_nonlin_lvl.internals['household_ha']['D'], axis=(1, 2))
assert np.allclose(td_A - ss['ha']['A'], td_nonlin['A'])
| 5,800 | 33.945783 | 123 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/__init__.py | """Tests for base-level functionality of the package""" | 55 | 55 | 55 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_options.py | import numpy as np
import pytest
from sequence_jacobian.examples import krusell_smith
def test_jacobian_h(krusell_smith_dag):
_, ss, dag, *_ = krusell_smith_dag
hh = dag['hh']
lowacc = hh.jacobian(ss, inputs=['r'], outputs=['C'], T=10, h=0.05)
midacc = hh.jacobian(ss, inputs=['r'], outputs=['C'], T=10, h=1E-3)
usual = hh.jacobian(ss, inputs=['r'], outputs=['C'], T=10, h=1E-4)
nooption = hh.jacobian(ss, inputs=['r'], outputs=['C'], T=10)
assert np.array_equal(usual['C','r'], nooption['C','r'])
assert np.linalg.norm(usual['C','r'] - midacc['C','r']) < np.linalg.norm(usual['C','r'] - lowacc['C','r'])
midacc_alt = hh.jacobian(ss, inputs=['r'], outputs=['C'], T=10, options={'hh': {'h': 1E-3}})
assert np.array_equal(midacc['C', 'r'], midacc_alt['C', 'r'])
lowacc = dag.jacobian(ss, inputs=['K'], outputs=['C'], T=10, options={'hh': {'h': 0.05}})
midacc = dag.jacobian(ss, inputs=['K'], outputs=['C'], T=10, options={'hh': {'h': 1E-3}})
usual = dag.jacobian(ss, inputs=['K'], outputs=['C'], T=10, options={'hh': {'h': 1E-4}})
assert np.linalg.norm(usual['C','K'] - midacc['C','K']) < np.linalg.norm(usual['C','K'] - lowacc['C','K'])
def test_jacobian_steady_state(krusell_smith_dag):
dag = krusell_smith_dag[2]
calibration = {"eis": 1, "delta": 0.025, "alpha": 0.11, "rho": 0.966, "sigma": 0.5,
"L": 1.0, "nS": 2, "nA": 10, "amax": 200, "r": 0.01, 'beta': 0.96,
"Z": 0.85, "K": 3.}
pytest.raises(ValueError, dag.steady_state, calibration, options={'hh': {'backward_maxit': 10}})
ss1 = dag.steady_state(calibration)
ss2 = dag.steady_state(calibration, options={'hh': {'backward_maxit': 100000}})
assert ss1['A'] == ss2['A']
def test_steady_state_solution(krusell_smith_dag):
dag_ss, ss, *_ = krusell_smith_dag
calibration = {'eis': 1.0, 'delta': 0.025, 'alpha': 0.11, 'rho': 0.966, 'sigma': 0.5,
'Y': 1.0, 'L': 1.0, 'nS': 2, 'nA': 10, 'amax': 200, 'r': 0.01}
unknowns_ss = {'beta': (0.98 / 1.01, 0.999 / 1.01)}
targets_ss = {'asset_mkt': 0.}
# less accurate solution
ss2 = dag_ss.solve_steady_state(calibration, unknowns_ss, targets_ss, solver="brentq",
ttol=1E-2, ctol=1E-2)
assert not np.isclose(ss['asset_mkt'], ss2['asset_mkt'])
# different solution method (Newton needs other inputs)
with pytest.raises(ValueError):
ss3 = dag_ss.solve_steady_state(calibration, unknowns_ss, targets_ss,
solver="newton")
| 2,593 | 42.966102 | 110 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_jacobian_dict_block.py | """Test JacobianDictBlock functionality"""
import numpy as np
from sequence_jacobian import combine
from sequence_jacobian.examples import rbc
from sequence_jacobian.blocks.auxiliary_blocks.jacobiandict_block import JacobianDictBlock
from sequence_jacobian import SteadyStateDict
def test_jacobian_dict_block_impulses(rbc_dag):
rbc_model, ss, unknowns, _, exogenous = rbc_dag
T = 10
J_pe = rbc_model.jacobian(ss, inputs=unknowns + exogenous, T=10)
J_block = JacobianDictBlock(J_pe)
J_block_Z = J_block.jacobian(SteadyStateDict({}), ["Z"])
for o in J_block_Z.outputs:
assert np.all(J_block[o].get("Z") == J_block_Z[o].get("Z"))
dZ = 0.8 ** np.arange(T)
dO1 = J_block @ {"Z": dZ}
dO2 = J_block_Z @ {"Z": dZ}
for k in J_block:
assert np.all(dO1[k] == dO2[k])
def test_jacobian_dict_block_combine(rbc_dag):
_, ss, _, _, exogenous = rbc_dag
J_firm = rbc.firm.jacobian(ss, inputs=exogenous)
blocks_w_jdict = [rbc.household, J_firm, rbc.mkt_clearing]
cblock_w_jdict = combine(blocks_w_jdict)
# Using `combine` converts JacobianDicts to JacobianDictBlocks
assert isinstance(cblock_w_jdict.blocks[0], JacobianDictBlock)
| 1,204 | 29.125 | 90 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_solved_block.py | import numpy as np
from sequence_jacobian import simple, solved
from sequence_jacobian.classes.steady_state_dict import SteadyStateDict
from sequence_jacobian.classes.jacobian_dict import FactoredJacobianDict
@simple
def myblock(u, i):
res = 0.5 * i(1) - u**2 - u(1)
return res
@solved(unknowns={'u': (-10.0, 10.0)}, targets=['res'], solver='brentq')
def myblock_solved(u, i):
res = 0.5 * i(1) - u**2 - u(1)
return res
def test_solved_block():
ss = SteadyStateDict({'u': 5, 'i': 10, 'res': 0.0})
# Compute jacobian of myblock_solved from scratch
J1 = myblock_solved.jacobian(ss, inputs=['i'], T=20)
# Compute jacobian of SolvedBlock using a pre-computed FactoredJacobian
J_u = myblock.jacobian(ss, inputs=['u'], T=20) # square jac of underlying simple block
J_factored = FactoredJacobianDict(J_u, T=20)
J_i = myblock.jacobian(ss, inputs=['i'], T=20) # jac of underlying simple block wrt inputs that are NOT unknowns
J2 = J_factored.compose(J_i) # obtain jac of unknown wrt to non-unknown inputs using factored jac
assert np.allclose(J1['u']['i'], J2['u']['i'])
| 1,148 | 34.90625 | 118 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_stage_block.py | import numpy as np
from sequence_jacobian.blocks.stage_block import StageBlock
from sequence_jacobian.hetblocks.hh_sim import hh, hh_init
from sequence_jacobian.blocks.support.stages import Continuous1D, ExogenousMaker
from sequence_jacobian import interpolate, grids, misc, combine
from sequence_jacobian.classes import ImpulseDict
def make_grids(rho_e, sd_e, nE, amin, amax, nA):
e_grid, e_dist, Pi_ss = grids.markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE)
a_grid = grids.agrid(amin=amin, amax=amax, n=nA)
return e_grid, e_dist, Pi_ss, a_grid
def alter_Pi(Pi_ss, shift):
Pi = Pi_ss.copy()
Pi[:, 0] -= shift
Pi[:, -1] += shift
return Pi
def income(atw, N, e_grid, transfer):
y = atw * N * e_grid + transfer
return y
# copy original household hetblock but get rid of _p on Va
def household_new(Va, a_grid, y, r, beta, eis):
uc_nextgrid = beta * Va
c_nextgrid = uc_nextgrid ** (-eis)
coh = (1 + r) * a_grid[np.newaxis, :] + y[:, np.newaxis]
a = interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid)
misc.setmin(a, a_grid[0])
c = coh - a
Va = (1 + r) * c ** (-1 / eis)
return Va, a, c
def marginal_utility(c, eis):
uc = c ** (-1 / eis)
return uc
#het_stage = Continuous1D(backward='Va', policy='a', f=household_new, name='stage1')
het_stage = Continuous1D(backward='Va', policy='a', f=household_new, name='stage1', hetoutputs=[marginal_utility])
hh2 = StageBlock([ExogenousMaker('Pi', 0, 'stage0'), het_stage], name='hh',
backward_init=hh_init, hetinputs=(make_grids, income, alter_Pi))
def test_equivalence():
hh1 = hh.add_hetinputs([make_grids, income, alter_Pi]).add_hetoutputs([marginal_utility])
calibration = {'r': 0.004, 'eis': 0.5, 'rho_e': 0.91, 'sd_e': 0.92, 'nE': 3,
'amin': 0.0, 'amax': 200, 'nA': 100, 'transfer': 0.143, 'N': 1,
'atw': 1, 'beta': 0.97, 'shift': 0}
ss1 = hh1.steady_state(calibration)
ss2 = hh2.steady_state(calibration)
# test steady-state equivalence
assert np.isclose(ss1['A'], ss2['A'])
assert np.isclose(ss1['C'], ss2['C'])
assert np.allclose(ss1.internals['hh']['Dbeg'], ss2.internals['hh']['stage0']['D'])
assert np.allclose(ss1.internals['hh']['a'], ss2.internals['hh']['stage1']['a'])
assert np.allclose(ss1.internals['hh']['c'], ss2.internals['hh']['stage1']['c'])
assert np.allclose(ss1.internals['hh']['Va'], ss2.internals['hh']['stage0']['Va'])
# find Jacobians...
inputs = ['r', 'atw', 'shift']
outputs = ['A', 'C', 'UC']
T = 200
J1 = hh1.jacobian(ss1, inputs, outputs, T)
J2 = hh2.jacobian(ss2, inputs, outputs, T)
# test Jacobian equivalence
for i in inputs:
for o in outputs:
if o == 'UC':
# not sure why numerical differences somewhat larger here?
assert np.max(np.abs(J1[o, i] - J2[o, i])) < 2E-4
else:
assert np.allclose(J1[o, i], J2[o, i])
# impulse linear
shock = ImpulseDict({'r': 0.5 ** np.arange(20)})
td_lin1 = hh1.impulse_linear(ss1, shock, outputs=['C', 'UC'])
td_lin2 = hh2.impulse_linear(ss2, shock, outputs=['C', 'UC'])
assert np.allclose(td_lin1['C'], td_lin2['C'])
assert np.max(np.abs(td_lin1['UC'] - td_lin2['UC'])) < 2E-4
# impulse nonlinear
td_nonlin1 = hh1.impulse_nonlinear(ss1, shock * 1E-4, outputs=['C', 'UC'])
td_nonlin2 = hh2.impulse_nonlinear(ss2, shock * 1E-4, outputs=['C', 'UC'])
assert np.allclose(td_nonlin1['C'], td_nonlin2['C'])
assert np.allclose(td_nonlin1['UC'], td_nonlin2['UC'])
def test_remap():
# hetblock
hh1 = hh.add_hetinputs([make_grids, income, alter_Pi])
hh1_men = hh1.remap({k: k + '_men' for k in hh1.outputs | ['sd_e']}).rename('men')
hh1_women = hh1.remap({k: k + '_women' for k in hh1.outputs | ['sd_e']}).rename('women')
hh1_all = combine([hh1_men, hh1_women])
# stageblock
hh2_men = hh2.remap({k: k + '_men' for k in hh2.outputs| ['sd_e']}).rename('men')
hh2_women = hh2.remap({k: k + '_women' for k in hh2.outputs | ['sd_e']}).rename('women')
hh2_all = combine([hh2_men, hh2_women])
# steady state
calibration = {'sd_e_men': 0.92, 'sd_e_women': 0.82,
'r': 0.004, 'eis': 0.5, 'rho_e': 0.91, 'nE': 3,
'amin': 0.0, 'amax': 200, 'nA': 100, 'transfer': 0.143, 'N': 1,
'atw': 1, 'beta': 0.97, 'shift': 0}
ss1 = hh1_all.steady_state(calibration)
ss2 = hh2_all.steady_state(calibration)
# test steady-state equivalence
assert np.isclose(ss1['A_men'], ss2['A_men'])
assert np.isclose(ss1['C_women'], ss2['C_women'])
# find Jacobians...
inputs = ['r', 'atw', 'shift']
outputs = ['A_men', 'A_women']
T = 100
J1 = hh1_all.jacobian(ss1, inputs, outputs, T)
J2 = hh2_all.jacobian(ss2, inputs, outputs, T)
# test Jacobian equivalence
for i in inputs:
for o in outputs:
assert np.allclose(J1[o, i], J2[o, i])
# impulse linear
shock = ImpulseDict({'r': 0.5 ** np.arange(20)})
td_lin1 = hh1_all.impulse_linear(ss1, shock, outputs=['C_men', 'C_women'])
td_lin2 = hh2_all.impulse_linear(ss2, shock, outputs=['C_men', 'C_women'])
assert np.allclose(td_lin1['C_women'], td_lin2['C_women'])
# impulse nonlinear
td_nonlin1 = hh1_all.impulse_nonlinear(ss1, shock * 1E-4, outputs=['C_men'])
td_nonlin2 = hh2_all.impulse_nonlinear(ss2, shock * 1E-4, outputs=['C_men'])
assert np.allclose(td_nonlin1['C_men'], td_nonlin2['C_men'])
| 5,577 | 39.715328 | 114 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_public_classes.py | """Test public-facing classes"""
import numpy as np
import pytest
from sequence_jacobian import het
from sequence_jacobian.classes.steady_state_dict import SteadyStateDict
from sequence_jacobian.classes.impulse_dict import ImpulseDict
from sequence_jacobian.utilities.bijection import Bijection
def test_impulsedict(krusell_smith_dag):
_, ss, ks_model, unknowns, targets, _ = krusell_smith_dag
T = 200
# Linearized impulse responses as deviations
ir_lin = ks_model.solve_impulse_linear(ss, unknowns, targets, inputs={'Z': 0.01 * 0.5**np.arange(T)}, outputs=['C', 'K', 'r'])
# Get method
assert isinstance(ir_lin, ImpulseDict)
assert isinstance(ir_lin[['C']], ImpulseDict)
assert isinstance(ir_lin['C'], np.ndarray)
# Merge method
temp = ir_lin[['C', 'K']] | ir_lin[['r']]
assert list(temp.keys()) == ['C', 'K', 'r']
# SS and scalar multiplication
dC1 = 100 * ir_lin['C'] / ss['C']
dC2 = 100 * ir_lin[['C']] / ss
assert np.allclose(dC1, dC2['C'])
def test_bijection():
# generate and invert
mymap = Bijection({'a': 'a1', 'b': 'b1'})
mymapinv = mymap.inv
assert mymap['a'] == 'a1' and mymap['b'] == 'b1'
assert mymapinv['a1'] == 'a' and mymapinv['b1'] == 'b'
# duplicate keys rejected
with pytest.raises(ValueError):
Bijection({'a': 'a1', 'b': 'a1'})
# composition with another bijection (flows backwards)
mymap2 = Bijection({'a1': 'a2'})
assert (mymap2 @ mymap)['a'] == 'a2'
# composition with SteadyStateDict
ss = SteadyStateDict({'a': 2.0, 'b': 1.0})
ss_remapped = ss @ mymap
assert isinstance(ss_remapped, SteadyStateDict)
assert ss_remapped['a1'] == ss['a'] and ss_remapped['b1'] == ss['b']
| 1,734 | 31.735849 | 130 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_het_support.py | import numpy as np
from sequence_jacobian.blocks.support.het_support import (Transition,
PolicyLottery1D, PolicyLottery2D, Markov, CombinedTransition,
lottery_1d, lottery_2d)
from sequence_jacobian.utilities.multidim import batch_multiply_ith_dimension
def test_combined_markov():
shape = (5, 6, 7)
np.random.seed(12345)
for _ in range(10):
D = np.random.rand(*shape)
Pis = [np.random.rand(s, s) for s in shape[:2]]
markovs = [Markov(Pi, i) for i, Pi in enumerate(Pis)]
combined = CombinedTransition(markovs)
Dout = combined.expectation(D)
Dout_forward = combined.forward(D)
D_kron = D.reshape((-1, D.shape[2]))
Pi_kron = np.kron(Pis[0], Pis[1])
Dout2 = (Pi_kron @ D_kron).reshape(Dout.shape)
Dout2_forward = (Pi_kron.T @ D_kron).reshape(Dout.shape)
assert np.allclose(Dout, Dout2)
assert np.allclose(Dout_forward, Dout2_forward)
def test_many_markov_shock():
shape = (5, 6, 7)
np.random.seed(12345)
for _ in range(10):
D = np.random.rand(*shape)
Pis = [np.random.rand(s, s) for s in shape[:2]]
dPis = [np.random.rand(s, s) for s in shape[:2]]
h = 1E-4
Dout_up = CombinedTransition([Markov(Pi + h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]).forward(D)
Dout_dn = CombinedTransition([Markov(Pi - h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]).forward(D)
Dder = (Dout_up - Dout_dn) / (2*h)
Dder2 = CombinedTransition([Markov(Pi, i) for i, Pi in enumerate(Pis)]).forward_shockable(D).forward_shock(dPis)
assert np.allclose(Dder, Dder2)
def test_policy_shock():
shape = (3, 4, 30)
grid = np.geomspace(0.5, 10, shape[-1])
np.random.seed(98765)
a = (np.full(shape[0], 0.01)[:, np.newaxis, np.newaxis]
+ np.linspace(0, 1, shape[1])[:, np.newaxis]
+ 0.001*grid**2 + 0.9*grid + 0.5)
for _ in range(10):
D = np.random.rand(*shape)
da = np.random.rand(*shape)
h = 1E-5
Dout_up = lottery_1d(a + h*da, grid).forward(D)
Dout_dn = lottery_1d(a - h*da, grid).forward(D)
Dder = (Dout_up - Dout_dn) / (2*h)
Dder2 = lottery_1d(a, grid).forward_shockable(D).forward_shock(da)
assert np.allclose(Dder, Dder2, atol=1E-4)
def test_law_of_motion_shock():
# shock everything in the law of motion, and see if it works!
shape = (3, 4, 30)
grid = np.geomspace(0.5, 10, shape[-1])
np.random.seed(98765)
a = (np.full(shape[0], 0.01)[:, np.newaxis, np.newaxis]
+ np.linspace(0, 1, shape[1])[:, np.newaxis]
+ 0.001*grid**2 + 0.9*grid + 0.5)
for _ in range(10):
D = np.random.rand(*shape)
Pis = [np.random.rand(s, s) for s in shape[:2]]
da = np.random.rand(*shape)
dPis = [np.random.rand(s, s) for s in shape[:2]]
h = 1E-5
policy_up = lottery_1d(a + h*da, grid)
policy_dn = lottery_1d(a - h*da, grid)
markovs_up = [Markov(Pi + h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]
markovs_dn =[Markov(Pi - h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]
Dout_up = CombinedTransition([policy_up, *markovs_up]).forward(D)
Dout_dn = CombinedTransition([policy_dn, *markovs_dn]).forward(D)
Dder = (Dout_up - Dout_dn) / (2*h)
markovs = [Markov(Pi, i) for i, Pi, in enumerate(Pis)]
Dder2 = CombinedTransition([lottery_1d(a, grid), *markovs]).forward_shockable(D).forward_shock([da, *dPis])
assert np.allclose(Dder, Dder2, atol=1E-4)
def test_2d_policy_shock():
shape = (3, 4, 20, 30)
a_grid = np.geomspace(0.5, 10, shape[-2])
b_grid = np.geomspace(0.2, 8, shape[-1])
np.random.seed(98765)
a = (0.001*a_grid**2 + 0.9*a_grid + 0.5)[:, np.newaxis]
b = (-0.001*b_grid**2 + 0.9*b_grid + 0.5)
a = np.broadcast_to(a, shape)
b = np.broadcast_to(b, shape)
for _ in range(10):
D = np.random.rand(*shape)
Pis = [np.random.rand(s, s) for s in shape[:2]]
da = np.random.rand(*shape)
db = np.random.rand(*shape)
dPis = [np.random.rand(s, s) for s in shape[:2]]
h = 1E-5
policy_up = lottery_2d(a + h*da, b + h*db, a_grid, b_grid)
policy_dn = lottery_2d(a - h*da, b - h*db, a_grid, b_grid)
markovs_up = [Markov(Pi + h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]
markovs_dn = [Markov(Pi - h*dPi, i) for i, (Pi, dPi) in enumerate(zip(Pis, dPis))]
Dout_up = CombinedTransition([policy_up, *markovs_up]).forward(D)
Dout_dn = CombinedTransition([policy_dn, *markovs_dn]).forward(D)
Dder = (Dout_up - Dout_dn) / (2*h)
policy = lottery_2d(a, b, a_grid, b_grid)
markovs = [Markov(Pi, i) for i, Pi, in enumerate(Pis)]
Dder2 = CombinedTransition([policy, *markovs]).forward_shockable(D).forward_shock([[da, db], *dPis])
assert np.allclose(Dder, Dder2, atol=1E-4)
def test_forward_expectations_symmetry():
# given a random law of motion, should be identical to iterate forward on distribution,
# then aggregate, or take expectations backward on outcome, then aggregate
shape = (3, 4, 30)
grid = np.geomspace(0.5, 10, shape[-1])
np.random.seed(1423)
a = (np.full(shape[0], 0.01)[:, np.newaxis, np.newaxis]
+ np.linspace(0, 1, shape[1])[:, np.newaxis]
+ 0.001*grid**2 + 0.9*grid + 0.5)
for _ in range(10):
D = np.random.rand(*shape)
X = np.random.rand(*shape)
Pis = [np.random.rand(s, s) for s in shape[:2]]
markovs = [Markov(Pi, i) for i, Pi, in enumerate(Pis)]
lom = CombinedTransition([lottery_1d(a, grid), *markovs])
Dforward = D
for _ in range(30):
Dforward = lom.forward(Dforward)
outcome = np.vdot(Dforward, X)
Xbackward = X
for _ in range(30):
Xbackward = lom.expectation(Xbackward)
outcome2 = np.vdot(D, Xbackward)
assert np.isclose(outcome, outcome2)
def test_einsum():
D = np.random.rand(2, 5, 10)
P = np.random.rand(3, 2, 5, 10)
Dnew = np.einsum('xij,dxij->dij', D, P)
assert Dnew[0, 1, 1] == np.sum(P[0, :, 1, 1] * D[:, 1, 1])
# can I generalize this? reshape and then einsum
Dnew2 = batch_multiply_ith_dimension(P, 0, D)
assert (Dnew == Dnew2).all() | 6,429 | 34.136612 | 120 | py |
sequence-jacobian | sequence-jacobian-master/tests/base/test_combined_block.py | import numpy as np
import sequence_jacobian as sj
def test_jacobian_accumulation():
# Define two blocks. Notice: Second one does not use output from the first!
@sj.solved(unknowns={'p': (-10, 1000)}, targets=['valuation'] , solver="brentq")
def equity(r1, p, Y):
valuation = Y + p(+1) / (1 + r1) - p
return valuation
@sj.simple
def mkt_clearing(r0, r1, A0, A1, Y, B):
asset_mkt_0 = A0 + (r0 + Y - 0.5*r1) - B
asset_mkt_1 = A1 + (r1 + Y - 0.5*r0) - B
return asset_mkt_0, asset_mkt_1
both_blocks = sj.create_model([equity, mkt_clearing])
only_second = sj.create_model([mkt_clearing])
calibration = {'B': 0, 'Y': 0, 'r0': 0.01/4, 'r1': 0.01/4, 'A0': 1, 'A1': 1}
ss_both = both_blocks.steady_state(calibration)
ss_second = only_second.steady_state(calibration)
# Second block alone gives us Jacobian without issues.
unknowns_td = ['Y', 'r1']
targets_td = ['asset_mkt_0', 'asset_mkt_1']
T = 300
shock = {'r0': 0.95**np.arange(T)}
irf = only_second.solve_impulse_linear(ss_second, unknowns_td, targets_td, shock)
G = only_second.solve_jacobian(ss_second, unknowns_td, targets_td, ['r0'], T=T)
# Both blocks give us trouble. Even though solve_impulse_linear runs through...
unknowns_td = ['Y', 'r1']
targets_td = ['asset_mkt_0', 'asset_mkt_1']
T = 300
shock = {'r0': 0.95**np.arange(T)}
irf = both_blocks.solve_impulse_linear(ss_both, unknowns_td, targets_td, shock)
G = both_blocks.solve_jacobian(ss_both, unknowns_td, targets_td, ['r0'], T=T) | 1,583 | 36.714286 | 85 | py |
sequence-jacobian | sequence-jacobian-master/tests/performance/__init__.py | """Tests to check the performance of the code""" | 48 | 48 | 48 | py |
sequence-jacobian | sequence-jacobian-master/tests/utils/test_function.py | from sequence_jacobian.utilities.ordered_set import OrderedSet
from sequence_jacobian.utilities.function import (DifferentiableExtendedFunction, ExtendedFunction,
CombinedExtendedFunction, metadata)
import numpy as np
def f1(a, b, c):
k = a + 1
l = b - c
return k, l
def f2(b):
k = b + 4
return k
def test_metadata():
assert metadata(f1) == ('f1', OrderedSet(['a', 'b', 'c']), OrderedSet(['k', 'l']))
assert metadata(f2) == ('f2', OrderedSet(['b']), OrderedSet(['k']))
def test_extended_function():
inputs = {'a': 1, 'b': 2, 'c': 3}
assert ExtendedFunction(f1)(inputs) == {'k': 2, 'l': -1}
assert ExtendedFunction(f2)(inputs) == {'k': 6}
def f3(a, b):
c = a*b - 5*a
d = 3*b**2
return c, d
def test_differentiable_extended_function():
extf3 = ExtendedFunction(f3)
ss1 = {'a': 1, 'b': 2}
inputs1 = {'a': 0.5}
diff = extf3.differentiable(ss1).diff(inputs1)
assert np.isclose(diff['c'], -1.5)
assert np.isclose(diff['d'], 0)
def f4(a, c, e):
f = a / c + a * e - c
return f
def test_differentiable_combined_extended_function():
# swapping in combined extended function to see if it works!
fs = CombinedExtendedFunction([f3, f4])
ss1 = {'a': 1, 'b': 2, 'e': 4}
ss1.update(fs(ss1))
inputs1 = {'a': 0.5, 'e': 1}
diff = fs.differentiable(ss1).diff(inputs1)
assert np.isclose(diff['c'], -1.5)
assert np.isclose(diff['d'], 0)
assert np.isclose(diff['f'], 4.5)
# test narrowing down outputs
diff = fs.differentiable(ss1).diff(inputs1, outputs=['c','d'])
assert np.isclose(diff['c'], -1.5)
assert np.isclose(diff['d'], 0)
assert list(diff) == ['c', 'd']
# if no shocks to first function, hide first function
inputs2 = {'e': -2}
diff = fs.differentiable(ss1).diff2(inputs2)
assert list(diff) == ['f']
assert np.isclose(diff['f'], -2)
# if we ask for output from first function but no inputs shocked, shouldn't be there!
diff = fs.differentiable(ss1).diff(inputs2, outputs=['c', 'f'])
assert list(diff) == ['f']
assert np.isclose(diff['f'], -2)
| 2,154 | 25.9375 | 100 | py |
sequence-jacobian | sequence-jacobian-master/tests/utils/test_ordered_set.py | from sequence_jacobian.utilities.ordered_set import OrderedSet
def test_ordered_set():
# order matters
assert OrderedSet([1,2,3]) != OrderedSet([3,2,1])
# first insertion determines order
assert OrderedSet([5,1,6,5]) == OrderedSet([5,1,6])
# union preserves first and second order
assert (OrderedSet([6,1,3]) | OrderedSet([3,1,7,9])) == OrderedSet([6,1,3,7,9])
# intersection preserves first order
assert (OrderedSet([6,1,3]) & OrderedSet([3,1,7])) == OrderedSet([1,3])
# difference works
assert (OrderedSet([6,1,3,2]) - OrderedSet([3,1,7])) == OrderedSet([6,2])
# symmetric difference: first then second
assert (OrderedSet([6,1,3,8]) ^ OrderedSet([3,1,7,9])) == OrderedSet([6,8,7,9])
# in-place versions of these
s = OrderedSet([6,1,3])
s2 = s
s2 |= OrderedSet([3,1,7,9])
assert s == OrderedSet([6,1,3,7,9])
s = OrderedSet([6,1,3])
s2 = s
s2 &= OrderedSet([3,1,7])
assert s == OrderedSet([1,3])
s = OrderedSet([6,1,3,2])
s2 = s
s2 -= OrderedSet([3,1,7])
assert s == OrderedSet([6,2])
s = OrderedSet([6,1,3,8])
s2 = s
s2 ^= OrderedSet([3,1,7,9])
assert s == OrderedSet([6,8,7,9])
# comparisons (order not used for these)
assert OrderedSet([4,3,2,1]) <= OrderedSet([1,2,3,4])
assert not (OrderedSet([4,3,2,1]) < OrderedSet([1,2,3,4]))
assert OrderedSet([3,2,1]) < OrderedSet([1,2,3,4])
# allow second argument (but ONLY second argument) to be any iterable, not just ordered set
# we use the order from the iterable...
assert (OrderedSet([6,1,3]) | [3,1,7,9]) == OrderedSet([6,1,3,7,9])
assert (OrderedSet([6,1,3]) & [3,1,7]) == OrderedSet([1,3])
assert (OrderedSet([6,1,3,2]) - [3,1,7]) == OrderedSet([6,2])
assert (OrderedSet([6,1,3,8]) ^ [3,1,7,9]) == OrderedSet([6,8,7,9])
def test_ordered_set_dict_from():
assert OrderedSet(['a','b','c']).dict_from([1, 2, 3]) == {'a': 1, 'b': 2, 'c': 3} | 1,965 | 33.491228 | 95 | py |
sequence-jacobian | sequence-jacobian-master/tests/utils/test_multidim.py | from sequence_jacobian.utilities.multidim import outer
import numpy as np
def test_2d():
a = np.random.rand(10)
b = np.random.rand(12)
assert np.allclose(np.outer(a,b), outer([a,b]))
def test_3d():
a = np.array([1., 2])
b = np.array([1., 7])
small = np.outer(a, b)
c = np.array([2., 4])
product = np.empty((2,2,2))
product[..., 0] = 2*small
product[..., 1] = 4*small
assert np.array_equal(product, outer([a,b,c]))
| 462 | 22.15 | 54 | py |
sequence-jacobian | sequence-jacobian-master/tests/utils/test_DAG.py | from sequence_jacobian.utilities.graph import DAG
from sequence_jacobian.utilities.ordered_set import OrderedSet
from sequence_jacobian import simple, combine
import pytest
class Block:
def __init__(self, inputs, outputs):
self.inputs = OrderedSet(inputs)
self.outputs = OrderedSet(outputs)
test_dag = DAG([Block(inputs=['a', 'b', 'z'], outputs=['c', 'd']),
Block(inputs=['a', 'e'], outputs=['b']),
Block(inputs = ['d'], outputs=['f'])])
def test_dag_constructor():
# the blocks should be ordered 1, 0, 2
assert list(test_dag.blocks[0].inputs) == ['a', 'e']
assert list(test_dag.blocks[1].inputs) == ['a', 'b', 'z']
assert list(test_dag.blocks[2].inputs) == ['d']
assert set(test_dag.inmap['a']) == {0, 1}
assert set(test_dag.inmap['b']) == {1}
assert test_dag.outmap['c'] == 1
assert test_dag.outmap['f'] == 2
assert test_dag.outmap['d'] == 1
assert set(test_dag.adj[0]) == {1}
assert set(test_dag.adj[1]) == {2}
assert set(test_dag.revadj[2]) == {1}
assert set(test_dag.revadj[1]) == {0}
def test_visited():
test_dag.visit_from_outputs(['f']) == OrderedSet([0, 1, 2])
test_dag.visit_from_outputs(['b']) == OrderedSet([0])
test_dag.visit_from_outputs(['d']) == OrderedSet([0, 1])
test_dag.visit_from_inputs(['e']) == OrderedSet([0, 1, 2])
test_dag.visit_from_inputs(['z']) == OrderedSet([1, 2])
def test_find_cycle():
@simple
def f1(x):
y = x
return y
@simple
def f2(y, theta):
z = y
return z
@simple
def f3(x, z):
w = x * z
return w
@simple
def f4(z):
x = z
return x
with pytest.raises(Exception) as exception:
a = combine([f1, f2, f3, f4])
assert "Topological sort failed: cyclic dependency f1 -> f4 -> f2 -> f1" in str(exception.value)
| 1,893 | 26.449275 | 100 | py |
APIHarvest | APIHarvest-main/backend.py | import elasticsearch
import json
import requests
import csv
from flask import Flask, request, jsonify
from flask_cors import CORS
import traceback
app = Flask(__name__)
CORS(app)
es = elasticsearch.Elasticsearch(["http://localhost:9200"])
def es_create_index_if_not_exists(es, index):
"""Create the given ElasticSearch index and ignore error if it already exists"""
try:
es.indices.create(index)
except elasticsearch.exceptions.RequestError as ex:
if ex.error == 'resource_already_exists_exception':
pass # Index already exists. Ignore.
else: # Other exception - raise it
raise ex
def filter_apis(name):
query = {
"query": {
"bool": {
"must": []
}
}
}
headers={
"Content-Type": "application/json"
}
if name:
query["query"]["bool"]["must"].append({
"match": {
"name": name
}
})
all_res = {}
res = es.search(index="stackoverflow", body=query, headers={
"Content-Type": "application/json"
})
all_res["stackoverflow"] = res["hits"]["hits"]
res = es.search(index="github", body=query, headers={
"Content-Type": "application/json"
})
all_res["github"] = res["hits"]["hits"]
res = es.search(index="tweet", body=query, headers={
"Content-Type": "application/json"
})
all_res["tweet"] = res["hits"]["hits"]
res = es.search(index="cve", body=query, headers={
"Content-Type": "application/json"
})
all_res["cve"] = res["hits"]["hits"]
# res = es.search(index="youtube", body=query, headers={
# "Content-Type": "application/json"
# })
# all_res["youtube"] = res["hits"]["hits"]
return all_res
@app.route("/search", methods=["GET"])
def search():
name = request.args.get("name")
print("in search")
apis = filter_apis(name)
print(apis)
return jsonify({
"hits": {
"hits": apis
}
})
@app.route("/insert", methods=["POST"])
def insert():
api_data = request.json
res = es.index(index="api", body=movie_data)
return jsonify({
"result": "success",
"id": res["_id"]
})
@app.route("/filter", methods=["POST"])
def filter():
data = request.get_json()
name = data.get("name")
apis = filter_apis(name, actors, genre, date)
return jsonify(apis)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 2,337 | 21.266667 | 84 | py |
APIHarvest | APIHarvest-main/scripts/import_arseek_so.py | import elasticsearch
import json
import requests
def es_create_index_if_not_exists(es, index):
"""Create the given ElasticSearch index and ignore error if it already exists"""
try:
es.indices.create(index=index)
except elasticsearch.exceptions.RequestError as ex:
if ex.error == 'resource_already_exists_exception':
pass # Index already exists. Ignore.
else: # Other exception - raise it
raise ex
def insert_SO(filename):
with open(filename, "r") as file:
so_dict = json.load(file)
for name in so_dict:
for item in so_dict[name]:
post = {
"name": name,
"title": item["title"],
"link": item["link"],
"content": thread_content[item["thread_id"]]
}
es.index(index="stackoverflow", body=post)
thread_content = {}
with open('data/text_code_pairs_test.jsonl') as f:
for line in f:
data = json.loads(line)
thread_content[data["thread_id"]] = data["pairs"][0]
es = elasticsearch.Elasticsearch(["http://localhost:9200"])
#es.indices.delete(index="stackoverflow")
es_create_index_if_not_exists(es,"stackoverflow")
insert_SO("data/threads_to_index.json") | 1,245 | 32.675676 | 84 | py |
APIHarvest | APIHarvest-main/scripts/import_haryono_cve.py | import elasticsearch
import json
import requests
import os
def es_create_index_if_not_exists(es, index):
"""Create the given ElasticSearch index and ignore error if it already exists"""
try:
es.indices.create(index=index)
except elasticsearch.exceptions.RequestError as ex:
if ex.error == 'resource_already_exists_exception':
pass # Index already exists. Ignore.
else: # Other exception - raise it
raise ex
def insert_cve():
for i in range(len(cve_labels)):
for cve_label in cve_labels[i].split(" "):
cve = {
"name": cve_label,
"content": cve_texts[i]
}
es.index(index="cve", body=cve)
cve_texts = open("data/test_texts.txt").read().splitlines()
cve_labels = open("data/test_labels.txt").read().splitlines()
es = elasticsearch.Elasticsearch(["http://localhost:9200"])
es.indices.delete(index="cve")
es_create_index_if_not_exists(es,"cve")
insert_cve() | 994 | 32.166667 | 84 | py |
APIHarvest | APIHarvest-main/scripts/import_ausearch_code.py | import elasticsearch
import json
import requests
def es_create_index_if_not_exists(es, index):
"""Create the given ElasticSearch index and ignore error if it already exists"""
try:
es.indices.create(index=index)
except elasticsearch.exceptions.RequestError as ex:
if ex.error == 'resource_already_exists_exception':
pass # Index already exists. Ignore.
else: # Other exception - raise it
raise ex
def insert_code():
for name in code_content:
code = {
"name": name,
"content": code_content[name]
}
es.index(index="github", body=code)
code_content = {}
with open('data/text_code_pairs_test.jsonl') as f:
for line in f:
data = json.loads(line)
code_content[data["target_fqn"]] = data["pairs"][1]
es = elasticsearch.Elasticsearch(["http://localhost:9200"])
#es.indices.delete(index="github")
es_create_index_if_not_exists(es,"github")
insert_code() | 980 | 29.65625 | 84 | py |
APIHarvest | APIHarvest-main/scripts/import_zhang_tweet.py | import elasticsearch
import json
import requests
import os
def es_create_index_if_not_exists(es, index):
"""Create the given ElasticSearch index and ignore error if it already exists"""
try:
es.indices.create(index=index)
except elasticsearch.exceptions.RequestError as ex:
if ex.error == 'resource_already_exists_exception':
pass # Index already exists. Ignore.
else: # Other exception - raise it
raise ex
def insert_tweet():
for name in tweets:
for text in tweets[name]:
tweet = {
"name": name,
"content": text
}
es.index(index="tweet", body=tweet)
tweets = {}
for fname in os.listdir('data/tweets'):
data = eval(open('data/tweets/'+fname).read())
name = fname.split(".")[0]
tweets[name] = []
for arr in data:
if arr[1] == "Lib":
tweets[name].append(arr[0])
es = elasticsearch.Elasticsearch(["http://localhost:9200"])
#es.indices.delete(index="tweet")
es_create_index_if_not_exists(es,"tweet")
insert_tweet() | 1,090 | 28.486486 | 84 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, Extension
import numpy
# Version number
version = '0.0.1'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# cos_module_np = Extension('cos_module_np',
# sources=['PcgComp/kernels/cos_module_np.c'],
# include_dirs=[numpy.get_include()])
setup(name = 'PcgComp',
version = version,
author = read('AUTHORS.txt'),
author_email = "kurt.cutajar@eurecom.fr",
description = ("Comparison of Preconditioning Techniques for Kernel Matrices"),
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels preconditioning",
packages = ["PcgComp.methods",
"PcgComp.kernels",
"PcgComp.preconditioners",
"PcgComp.util"],
package_dir={'PcgComp': 'PcgComp'},
py_modules = ['PcgComp.__init__'],
install_requires=['numpy>=1.7', 'scipy>=0.12'],
extras_require = {'docs':['matplotlib >=1.3','Sphinx','IPython']},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence'] )
| 1,546 | 36.731707 | 87 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/experiment_powerplant.py | import sys
import numpy as np
import random as ran
import PcgComp
import random as ran
import time
def standardizeData(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
std = np.std(arr[:,col])
mean = np.mean(arr[:,col])
arr[:,col] = (arr[:,col] - mean) / std
return arr
def normalizeColumns(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
maxim = arr[:,col].max()
minim = arr[:,col].min()
arr[:,col] = (arr[:,col] - minim) / (maxim - minim)
return arr
args = sys.argv
# <------ select dataset for regression ------>
data = np.loadtxt('PowerPlant_Data.csv',delimiter=',')
data = standardizeData(data)
X = data[:,:4]
Y = data[:,4][:,None]
seed = 48
np.random.seed(seed)
N = np.shape(X)[0]
var = float(args[2])
ls = float(args[3])
noise = float(args[4])
th = 1e-10
#file = open("resultsDirect.txt", "w")
# <------ randomly sample + optimize sub-inputs ------>
M = int(np.sqrt(N))
ipHelper = PcgComp.util.InducingPointsHelper(seed)
XmRandom = ipHelper.get_random_inducing_points(X,M)
# <------ randomly sample + optimize sub-inputs ------>
kern = PcgComp.kernels.RBF(ls, var, noise)
K = kern.K(X,X) + kern.noise*np.identity(N)
cg = PcgComp.methods.Cg(K,Y,threshold=th)
cgIterations = int(cg.iterations)
if (args[1] == 'kron'):
P2 = PcgComp.preconditioners.Kiss(X, kern)
pcg = PcgComp.methods.KronTruncatedFlexiblePcg(K, Y, P2.precon, P2.W, P2.Ku, kern, threshold=1e-10, innerThreshold=1e-10)
pcgIterations = int(pcg.outer_iterations)
else:
if (args[1] == 'block'):
precon = PcgComp.preconditioners.BlockJacobi(X, kern, M)
elif (args[1] == 'svd'):
precon = PcgComp.preconditioners.SVD(X, kern, M)
elif (args[1] == 'pitc'):
precon = PcgComp.preconditioners.PITC(X, kern, XmRandom)
elif (args[1] == 'fitc'):
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
elif (args[1] == 'spectral'):
precon = PcgComp.preconditioners.Spectral(X, Y, kern, M)
else:
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
P = precon.precon
pcg = PcgComp.methods.RegularPcg(K, Y, P, threshold=th,preconInv=precon.get_inversion())
pcgIterations = int(pcg.iterations)
print cgIterations
print pcgIterations
print np.log10(float(pcgIterations)/float(cgIterations))
| 2,263 | 24.727273 | 122 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/experiment_concrete.py | import sys
import numpy as np
import random as ran
import PcgComp
import random as ran
import time
def standardizeData(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
std = np.std(arr[:,col])
mean = np.mean(arr[:,col])
arr[:,col] = (arr[:,col] - mean) / std
return arr
def normalizeColumns(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
maxim = arr[:,col].max()
minim = arr[:,col].min()
arr[:,col] = (arr[:,col] - minim) / (maxim - minim)
return arr
args = sys.argv
# <------ select dataset for regression ------>
data = np.loadtxt('Concrete_Data.csv',delimiter=',')
data = standardizeData(data)
X = data[:,:8]
Y = data[:,8][:,None]
seed = 48
np.random.seed(seed)
N = np.shape(X)[0]
var = float(args[2])
ls = float(args[3])
noise = float(args[4])
th = 1e-10
#file = open("resultsDirect.txt", "w")
# <------ randomly sample + optimize sub-inputs ------>
M = int(np.sqrt(N))
ipHelper = PcgComp.util.InducingPointsHelper(seed)
XmRandom = ipHelper.get_random_inducing_points(X,M)
# <------ randomly sample + optimize sub-inputs ------>
kern = PcgComp.kernels.RBF(ls, var, noise)
K = kern.K(X,X) + kern.noise*np.identity(N)
cg = PcgComp.methods.Cg(K,Y,threshold=th)
cgIterations = int(cg.iterations)
if (args[1] == 'kron'):
P2 = PcgComp.preconditioners.Kiss(X, kern)
pcg = PcgComp.methods.KronTruncatedFlexiblePcg(K, Y, P2.precon, P2.W, P2.Ku, kern, threshold=1e-10, innerThreshold=1e-10)
pcgIterations = int(pcg.outer_iterations)
else:
if (args[1] == 'block'):
precon = PcgComp.preconditioners.BlockJacobi(X, kern, M)
elif (args[1] == 'svd'):
precon = PcgComp.preconditioners.SVD(X, kern, M)
elif (args[1] == 'pitc'):
precon = PcgComp.preconditioners.PITC(X, kern, XmRandom)
elif (args[1] == 'fitc'):
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
elif (args[1] == 'spectral'):
precon = PcgComp.preconditioners.Spectral(X, Y, kern, M)
else:
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
P = precon.precon
pcg = PcgComp.methods.RegularPcg(K, Y, P, threshold=th,preconInv=precon.get_inversion())
pcgIterations = int(pcg.iterations)
print cgIterations
print pcgIterations
print np.log10(float(pcgIterations)/float(cgIterations))
| 2,261 | 24.704545 | 122 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/experiment_protein.py | import sys
import numpy as np
import random as ran
import PcgComp
import random as ran
import time
def standardizeData(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
std = np.std(arr[:,col])
mean = np.mean(arr[:,col])
arr[:,col] = (arr[:,col] - mean) / std
return arr
def normalizeColumns(array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
maxim = arr[:,col].max()
minim = arr[:,col].min()
arr[:,col] = (arr[:,col] - minim) / (maxim - minim)
return arr
args = sys.argv
# <------ select dataset for regression ------>
data = np.loadtxt('Protein_Data.csv',delimiter=',')
data = standardizeData(data)
X = data[:,:9]
Y = data[:,9][:,None]
seed = 48
np.random.seed(seed)
N = np.shape(X)[0]
var = float(args[2])
ls = float(args[3])
noise = float(args[4])
th = 1e-10
#file = open("resultsDirect.txt", "w")
# <------ randomly sample + optimize sub-inputs ------>
M = int(np.sqrt(N))
ipHelper = PcgComp.util.InducingPointsHelper(seed)
XmRandom = ipHelper.get_random_inducing_points(X,M)
# <------ randomly sample + optimize sub-inputs ------>
kern = PcgComp.kernels.RBF(ls, var, noise)
K = kern.K(X,X) + kern.noise*np.identity(N)
cg = PcgComp.methods.Cg(K,Y,threshold=th)
cgIterations = int(cg.iterations)
if (args[1] == 'kron'):
P2 = PcgComp.preconditioners.Kiss(X, kern)
pcg = PcgComp.methods.KronTruncatedFlexiblePcg(K, Y, P2.precon, P2.W, P2.Ku, kern, threshold=1e-10, innerThreshold=1e-10)
pcgIterations = int(pcg.outer_iterations)
else:
if (args[1] == 'block'):
precon = PcgComp.preconditioners.BlockJacobi(X, kern, M)
elif (args[1] == 'svd'):
precon = PcgComp.preconditioners.SVD(X, kern, M)
elif (args[1] == 'pitc'):
precon = PcgComp.preconditioners.PITC(X, kern, XmRandom)
elif (args[1] == 'fitc'):
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
elif (args[1] == 'spectral'):
precon = PcgComp.preconditioners.Spectral(X, Y, kern, M)
else:
precon = PcgComp.preconditioners.FITC(X, kern, XmRandom)
P = precon.precon
pcg = PcgComp.methods.RegularPcg(K, Y, P, threshold=th,preconInv=precon.get_inversion())
pcgIterations = int(pcg.iterations)
print cgIterations
print pcgIterations
print np.log10(float(pcgIterations)/float(cgIterations))
| 2,260 | 24.693182 | 122 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/__init__.py | import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import methods
import preconditioners
import kernels
def load(file_path):
"""
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
:param file_name: path/to/file.pickle
"""
import cPickle as pickle
try:
with open(file_path, 'rb') as f:
m = pickle.load(f)
except:
import pickle as pickle
with open(file_path, 'rb') as f:
m = pickle.load(f)
return m
| 530 | 21.125 | 75 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/cg.py | import numpy as np
"""
Solve linear system using conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
init - Initial solution
thershold - Termintion criteria
"""
class Cg(object):
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
self.K = K
self.Y = Y.flatten()
x = init
r = Y - np.dot(K, x) #initialise residual gradient
p = r
t = 0
while True:
alpha = np.dot(r.T, r) / np.dot(p.T, np.dot(K, p))
x = x + alpha*p
r_prev = r
r = r - alpha*np.dot(K, p)
if ((np.dot(r.T,r).flatten() < (threshold*N)) or (t>15000)):
break
beta = np.dot(r.T, r) / np.dot(r_prev.T, r_prev)
p = r + beta*p
t = t + 1
self.iterations = t
self.result = x
| 772 | 17.853659 | 63 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/laplaceCg.py | import numpy as np
from scipy.stats import norm
from cg import Cg
import random
"""
Laplace approximation using conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class LaplaceCg(object):
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
cgRes = Cg(B, interim, threshold=threshold)
s1 = cgRes.result
innerC = innerC + cgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
| 1,118 | 20.519231 | 68 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/regularPcg.py | import numpy as np
"""
Solve linear system using regular preconditioned conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for outer loop
preconInv - Inversion of preconditioner Matrix
"""
class RegularPcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, preconInv=None):
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
if preconInv is None:
preconInv = np.linalg.inv(P)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r = Y - np.dot(K, x) #initialise residual gradient
z = np.dot(preconInv, r)
p = z
outerC = 0
while True:
alpha = np.dot(r.T, z) / np.dot(p.T,np.dot(K, p))
x = x + alpha*p
r_prev = r
r = r - alpha*np.dot(K,p)
if (np.dot(r.T, r).flatten() < threshold*N or outerC>10000):
break
z_prev = z
z = np.dot(preconInv, r)
beta = np.dot(z.T, r) / np.dot(z_prev.T, r_prev)
p = z + beta*p
outerC = outerC + 1
self.iterations = outerC
self.result = x
| 1,109 | 20.764706 | 72 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/flexPcg.py | import numpy as np
from cg import Cg
"""
Solve linear system using flexible conjugate gradient (without truncation)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for outer loop
innerThreshold - Termination criteria for inner loop
"""
class FlexiblePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, innerThreshold=1e-9):
N = np.shape(K)[0]
if init is None:
init = np.zeros(N)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros(N)
r = self.Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
innerC = 0
while True:
diff = r - r_prev
if (np.dot(diff.T,diff).flatten() < threshold) or k>5000:
break
interim = Cg(P, r, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
sum = 0
for i in xrange(k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + np.dot(frac, p[i])
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + np.dot(alpha,p[k])
r_prev = r
r = r - np.dot(alpha, np.dot(K, p[k]))
k = k + 1
self.result = x
self.iterations = innerC + k | 1,709 | 27.983051 | 95 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/kronCgDirect.py | import numpy as np
from ..util.kronHelper import KronHelper
from scipy import sparse
import time
"""
Solve linear system using conjugate gradient (intended for SKI inference)
Params:
K - Covariance Matrix
Ws - Sparse representation of weight matrix W
WTs - Sparse representation of transposed wieght matrix W
Ku - Array of dimension-specific kernels
Y - Target labels
noise - Variance of the likelihood
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class KronCgDirect(object):
def __init__(self, K, Ws, WTs, Ku, Y, noise, init=None, threshold=1e-9):
N = len(Y)
if init is None:
init = np.zeros(N)
self.Y = Y.flatten()
x = init
prod = sparse.csr_matrix.dot(Ws, KronHelper().kron_mvprod(Ku, sparse.csr_matrix.dot(WTs, x))).flatten() + np.dot(noise*np.identity(N), x)
r = self.Y - prod #initialise residual gradient
p = r
t = 1
while True:
prod = sparse.csr_matrix.dot(Ws, KronHelper().kron_mvprod(Ku, sparse.csr_matrix.dot(WTs, p))).flatten() + np.dot(noise*np.identity(N), p)
alpha = np.dot(r.T, r) / np.dot(p.T, prod)
x = x + np.dot(alpha, p)
r_prev = r
r = r - np.dot(alpha, prod)
if (np.dot(r.T,r).flatten() < threshold*N or t>15000):
break
beta = np.dot(r.T, r) / np.dot(r_prev.T, r_prev)
p = r + np.dot(beta, p)
t = t + 1
self.iterations = t
self.result = x
| 1,384 | 29.108696 | 140 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/kronTruncFlexPcg.py | import numpy as np
from cg import Cg
from scipy import sparse
from kronCgDirect import KronCgDirect
"""
Solve linear system using truncated flexible conjugate gradient (intended for SKI inference)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioning matrix
W - Weight matrix W
Ku - Array of dimension-specific kernels
kern - Kernel class
init - Initial solution
threshold - Termintion criteria for algorithm
innerThreshold - Termination criteria for inner loop
"""
class KronTruncatedFlexiblePcg(object):
def __init__(self, K, Y, P, W, Ku, kern=None, init=None, threshold=1e-9, innerThreshold=1e-9):
mMax = 15
N = np.shape(K)[0]
if init is None:
init = np.zeros(N)
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros(N)
r = self.Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
Ws = sparse.csr_matrix(W)
WTs = sparse.csr_matrix(W.T)
innerC = 0
while True:
if (np.dot(r.T,r).flatten() < threshold*N or k>15000):
break
interim = KronCgDirect(P, Ws, WTs, Ku, r, kern.noise, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
m = max(1, k % (mMax+1))
sum = 0
if (k-m < 0):
start = 0
else:
start = k - m
for i in xrange((k-m), k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + np.dot(frac, p[i])
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + np.dot(alpha,p[k])
r_prev = r
r = r - np.dot(alpha, np.dot(K, p[k]))
k = k + 1
self.outer_iterations = k
self.result = x
self.iterations = innerC + k | 2,176 | 28.418919 | 98 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/truncFlexPcg.py | import numpy as np
from cg import Cg
"""
Solve linear system using flexible conjugate gradient (with truncation)
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
thershold - Termintion criteria for outer loop
innerThreshold - Termination criteria for inner loop
"""
class TruncatedFlexiblePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, innerThreshold=1e-9):
mMax = 15
N = np.shape(K)[0]
if init is None:
init = np.zeros((N,1))
self.K = K
self.P = P
self.Y = Y.flatten()
x = init
r_prev = np.zeros((N,1))
r = Y - np.dot(self.K, x)
p = np.zeros(6000,dtype=object)
k = 0
innerC = 0
while True:
if (np.dot(r.T,r).flatten() < threshold*N or k>50000):
break
interim = Cg(P, r, threshold=innerThreshold)
z = interim.result
count = interim.iterations
innerC = innerC + count
if (k == 0):
p[k] = z
else:
m = max(1, k % (mMax+1))
sum = 0
if (k-m < 0):
start = 0
else:
start = k - m
for i in xrange((k-m), k):
frac = np.dot(z.T,np.dot(self.K,p[i]))/np.dot(p[i].T, np.dot(self.K, p[i]))
sum = sum + frac* p[i]
p[k] = z - sum
alpha = np.dot(p[k].T, r) / np.dot(p[k].T, np.dot(self.K, p[k]))
x = x + alpha*p[k]
r_prev = r
r = r - alpha*np.dot(K, p[k])
k = k + 1
self.outer_iterations = k
self.result = x
self.iterations = innerC + k | 1,869 | 27.333333 | 95 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/__init__.py | from cg import Cg
from regularPcg import RegularPcg
from flexPcg import FlexiblePcg
from truncFlexPcg import TruncatedFlexiblePcg
from kronCgDirect import KronCgDirect
from kronTruncFlexPcg import KronTruncatedFlexiblePcg
from laplaceCg import LaplaceCg
from laplacePcg import LaplacePcg | 287 | 35 | 53 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/methods/laplacePcg.py | import numpy as np
from scipy.stats import norm
from regularPcg import RegularPcg
import random
"""
Laplace approximation using preconditioned conjugate gradient
Params:
K - Covariance Matrix
Y - Target labels
P - Preconditioner Matrix (can be set to none)
init - Initial solution
threshold - Termintion criteria for algorithm
"""
class LaplacePcg(object):
def __init__(self, K, Y, P, init=None, threshold=1e-9, precon=None):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
pcgRes = RegularPcg(B, interim, None, threshold=threshold, preconInv=P.get_laplace_inversion(W,Wsqrt))
s1 = pcgRes.result
innerC = innerC + pcgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC | 1,278 | 23.132075 | 105 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/util/inducingPointsHelper.py | #This implementation is based on the article:
# @inproceedings{snelson2005sparse,
# title={Sparse Gaussian processes using pseudo-inputs},
# author={Snelson, Edward and Ghahramani, Zoubin},
# booktitle={Advances in neural information processing systems},
# pages={1257--1264},
# year={2005}
# }
from __future__ import division
import numpy as np
from scipy.spatial.distance import cdist, squareform, pdist
from scipy.optimize import fmin_l_bfgs_b
from scipy.linalg import cholesky
import random as ran
import numpy.matlib,numpy.linalg,numpy.random
"""
Helper class for inducing point methods.
"""
class InducingPointsHelper(object):
def __init__(self, seed):
ran.seed(seed)
self.name = "InducingPointsHelper"
"""
Returns a random selection of points from the given dataset
X - Dataset
M - Number of points to be selected
"""
def get_random_inducing_points(self, X, M):
rand = ran.sample(range(0, X.shape[0]), M)
return X[rand]
"""
Procedure for optimizing the given inducing points
X - Dataset
Y - Target labels
M - Number of inducing points
kern - Class of kernel function
"""
def optimize_inducing_points(self, X, Y, M, kern):
dim = np.shape(X)[1]
hyp_init = np.ones((dim+2, 1))
for i in xrange(dim):
hyp_init[i] = kern.lengthscale
hyp_len = len(hyp_init)
hyp_init[hyp_len - 2] = kern.variance
hyp_init[hyp_len - 1] = kern.noise
rand = ran.sample(range(0, X.shape[0]), M)
I = X[rand]
W = np.vstack((np.reshape(I, (M*dim,1), order='F'), hyp_init))
res = fmin_l_bfgs_b(self.like_spgp, W, iprint=False, args=(X,Y,M))[0]
return np.reshape(res[0:M*dim], (M, dim), order='F')
def dist(self, x1, x2):
x1 = np.reshape(x1,(-1,1))
x2 = np.reshape(x2,(-1,1))
n1 = len(x1)
n2 = len(x2)
return np.matlib.repmat(x1,1,n2) - np.matlib.repmat(x2.T,n1,1)
"""
Procedure for evaluating likelihood of the inducing point approximation
W - Array of hyperparameters (incl. inducing points)
x - Dataset
y - Target labels
M - Number of inducing points
kern - Class of kernel function
"""
def like_spgp(self, W, x, y, M):
jitter = 1e-6
N = np.shape(x)[0]
dim = np.shape(x)[1]
length = len(W)
pts = W[0:(length-2-dim)]
xb = np.reshape(pts, (M, dim), order='F')
b = np.exp(W[(length-2-dim):(length-2)])
c = np.exp(W[length-2])
sig = np.exp(W[length-1])
xb = xb * np.matlib.repmat(np.sqrt(b).T, M, 1)
x = x * np.matlib.repmat(np.sqrt(b).T, N, 1)
Q = np.dot(xb, xb.T)
diag = np.reshape(np.diag(Q), (-1,1))
Q = np.matlib.repmat(diag,1,M) + np.matlib.repmat(diag.T, M, 1) - 2*Q
Q = c*np.exp(-0.5*Q) + jitter*np.identity(M)
x_sum = np.reshape(np.sum(x*x, axis=1), (-1,1))
xb_sum = np.reshape(np.sum(xb*xb, axis=1), (-1,1))
K = -2*np.dot(xb, x.T) + np.matlib.repmat(x_sum.T,M,1) + np.matlib.repmat(xb_sum,1,N)
K = c*np.exp(-0.5*K)
L = cholesky(Q,lower=False).T
V = np.linalg.solve(L,K)
vSum = np.reshape(np.sum(np.power(V,2), axis=0),(-1,1))
ep = 1 + (c - vSum.T)/sig
epSqrt = np.reshape(np.sqrt(ep), (-1, 1))
K = K / np.matlib.repmat(epSqrt.T,M,1)
V = V / np.matlib.repmat(epSqrt.T,M,1)
y = y / epSqrt
Lm = cholesky(sig*np.identity(M) + np.dot(V,V.T), lower=False).T
invLmV = np.linalg.solve(Lm,V)
bet = np.dot(invLmV, y)
# Likelihood
fw = np.sum(np.log(np.diag(Lm))) + (N-M)/2*np.log(sig) + (np.dot(y.T,y) - np.dot(bet.T,bet))/2/sig + np.sum(np.log(ep))/2 + 0.5*N*np.log(2*np.pi)
# Derivatives
Lt = np.dot(L,Lm)
B1 = np.linalg.solve(Lt.T, invLmV)
b1 = np.linalg.solve(Lt.T, bet)
invLV = np.linalg.solve(L.T, V)
invL = np.linalg.inv(L)
invQ = np.dot(invL.T, invL)
invLt = np.linalg.inv(Lt)
invA = np.dot(invLt.T,invLt)
mu = np.dot(np.linalg.solve(Lm.T,bet).T, V).T
sumV = np.reshape(np.sum(np.power(V, 2),axis=0),(-1,1))
sumVsq = sumV.T
sumB = np.reshape(np.sum(invLmV*invLmV,axis=0), (-1,1))
bigSum = y*np.dot(bet.T,invLmV).T/sig - sumB/2 - (np.power(y,2)+np.power(mu,2))/2/sig + 0.5
TT = np.dot(invLV, (invLV.T*np.matlib.repmat(bigSum,1,M)))
dfxb = np.zeros((M,dim))
dfb = np.zeros(dim)
for i in xrange(dim):
dnnQ = self.dist(xb[:,i],xb[:,i])*Q
dNnK = self.dist(-xb[:,i],-x[:,i])*K
epdot = -2/sig*dNnK*invLV
epPmod = -1*np.reshape(np.sum(epdot,axis=0),(-1,1))
sum1 = np.reshape(np.sum((invQ - invA*sig)*dnnQ,axis=1), (-1,1))
sum2 = np.reshape(np.sum(dnnQ*TT,axis=1), (-1,1))
dfxb[:,i] = (-b1*(np.dot(dNnK, (y-mu))/sig + np.dot(dnnQ,b1)) + sum1 + np.dot(epdot,bigSum) - 2/sig*sum2).flatten()
dNnK = dNnK*B1
dfxb[:,i] = dfxb[:,i] + np.sum(dNnK,axis=1)
dfxb[:,i] = dfxb[:,i] * np.sqrt(b[i])
dfc = (M + jitter*np.trace(invQ-sig*invA) - sig*sum2)/2 - np.dot(mu.T, (y-mu))/sig + np.dot(b1.T, np.dot((Q - np.dot(jitter, np.identity(M))), b1))/2 + np.dot(epc,bigSum)
#noise
dfsig = np.sum(bigSum / ep.T)
derivs = np.vstack((np.reshape(dfxb,(M*dim,1),order='F'),np.reshape(dfb[0].flatten(),(-1,1)),np.reshape(dfb[1].flatten(),(-1,1)),np.reshape(dfc,(-1,1)),np.reshape(dfsig,(-1,1)))).flatten()
return fw, derivs | 5,072 | 30.70625 | 190 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/util/kronHelper.py | #This implementation is based on the article:
#
# @article{gilboa2015scaling,
# title={Scaling multidimensional inference for structured Gaussian processes},
# author={Gilboa, Elad and Saat{\c{c}}i, Yunus and Cunningham, John P},
# journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
# volume={37},
# number={2},
# pages={424--436},
# year={2015},
# publisher={IEEE}
# }
import numpy as np
"""
Helper class for methods relying on Kronecker inference.
"""
class KronHelper(object):
def __init__(self):
self.name = "KronHelper"
"""
Compute array of covariance matrices per grid dimension
dimVector - Vector of inducing points per dimension
D - Number of dimensions
kern - Class of kernel function
"""
def kron_inference(self, dimVector, D, kern):
Kds = np.zeros(D, dtype=object) #vector for holding covariance per dimension
K_kron = 1 # kronecker product of eigenvalues
# retrieve the one-dimensional variation of the designated kernel
for d in xrange(D):
xg = dimVector[d]
xg = np.reshape(xg, (len(xg), 1))
Kds[d] = kern.K_scalar(xg, xg, D)
#K_kron = np.kron(K_kron, Kds[d])
return [K_kron, Kds]
"""
Fast matrix-vector multiplication for Kronecker matrices
A - Array of dimension-specific kernels
b - Vector being multiplied
"""
def kron_mvprod(self, A, b):
x = b
N = 1
D = len(A)
G = np.zeros((D,1))
for d in xrange(0, D):
G[d] = len(A[d])
N = np.prod(G)
for d in xrange(D-1, -1, -1):
X = np.reshape(x, (G[d], round(N/G[d])), order='F')
Z = np.dot(A[d], X)
Z = Z.T
x = np.reshape(Z, (-1, 1), order='F')
return x
| 1,634 | 24.546875 | 81 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/util/__init__.py | from kronHelper import KronHelper
from inducingPointsHelper import InducingPointsHelper
from ssgp import SsgpHelper | 115 | 37.666667 | 53 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/util/ssgp.py | #This implementation of spectral GP approximation is based on the article:
#
# @article{lazaro2010sparse,
# title={Sparse spectrum Gaussian process regression},
# author={L{\'a}zaro-Gredilla, Miguel and Qui{\~n}onero-Candela, Joaquin and Rasmussen, Carl Edward and Figueiras-Vidal, An{\'\i}bal R},
# journal={The Journal of Machine Learning Research},
# volume={11},
# pages={1865--1881},
# year={2010},
# publisher={JMLR. org}
# }
from __future__ import division
import numpy as np
import random as ran
from scipy.spatial.distance import cdist, squareform, pdist
from scipy.optimize import fmin_l_bfgs_b
from scipy.linalg import cholesky
import numpy.matlib,numpy.linalg
import math
"""
Helper class for methods based on Fourier features.
"""
class SsgpHelper(object):
def __init__(self):
self.name = "SsgpHelper"
"""
Evaluate likelihood for approximation usinf random fourier features.
"""
def ssgpr(self, X, kern, S, Y):
[N, D] = X.shape
m = len(S)/D
W = np.reshape(S, (m, D), order='F')
phi = np.dot(X, W.T)
phi = np.hstack((np.cos(phi), np.sin(phi)))
A = np.dot(phi.T, phi) + kern.noise*np.identity(2*m)
R = cholesky(A, lower=False)
PhiRi = np.linalg.lstsq(R.T, phi.T)[0] # PhiRi = phi/R
Rtphity = np.dot(PhiRi, Y.flatten())
return 0.5/kern.noise*(np.sum(np.power(Y,2))-kern.noise/m*np.sum(np.power(Rtphity,2))) + np.sum(np.log(np.diag(R))) + (N/2 - m)*np.log(kern.noise)+N/2*np.log(2*np.pi)
"""
Optimize random selection of frequency points by taking the set which maximises the likelihood
over a series of iterations.
"""
def optimize_frequency_points(self, X, kern, Y, M, D):
nlml = np.inf
for k in xrange(5):
#S = np.random.randn(M*D)
S = np.random.multivariate_normal(np.zeros(D), (1/(4*np.pi**2)*(1/kern.lengthscale**2)*np.identity(D)), M).flatten()
#S = np.random.normal(0, 1/(4*np.pi**2*kern.lengthscale**2), M*D)
nlmlc = self.ssgpr(X, kern, S, Y)
if nlmlc<nlml:
S_save = S
nlml = nlmlc
return S_save
| 1,999 | 30.25 | 168 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/svd.py | import numpy as np
import time
from sklearn.utils.extmath import randomized_svd
from preconditioner import Preconditioner
"""
Randomized Singular Value Decomposition (SVD) Preconditioner
"""
class SVD(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
M - Rank of the decomposition
"""
def __init__(self, X, kern, M):
super(SVD, self).__init__("SVD")
start = time.time()
self.X = X
self.kern = kern
K = kern.K(X, X)
N = np.shape(X)[0]
#(self.U, self.Sigma, self.VT) = fb.pca(K, M)#, n_iter=1, l=M)
self.U, self.Sigma, self.VT = randomized_svd(K, M)
self.precon = np.dot(self.U, np.dot(np.diag(self.Sigma), self.VT)) + self.kern.noise*np.identity(N)
self.duration = time.time() - start
"""
Compute inversion of the preconditioner.
"""
def get_inversion(self):
N = np.shape(self.X)[0]
M = np.shape(self.Sigma)[0]
noise = self.kern.noise
inv_noise = float(1) / noise
noise_matrix = noise*np.identity(M)
# eigs, eigv = np.linalg.eig(np.diag(self.Sigma))
# for i in xrange(len(eigv)):
# if (eigs[i] < self.kern.jitter):
# eigs[i] = self.kern.jitter
# eigs[i] = np.sqrt(eigs[i])
eigs = np.sqrt(self.Sigma)
eigsD = np.diag(eigs)
left = np.dot(self.U, eigsD)
right = np.dot(eigsD, self.VT)
return inv_noise*self.woodbury_inversion(np.identity(N), left, noise_matrix, right)
"""
Implementation of Woodbury's matrix inversion lemma.
"""
def woodbury_inversion(self, Ainv, U, Cinv, V):
left_outer = np.dot(Ainv, U)
right_outer = np.dot(V, Ainv)
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
return Ainv - np.dot(left_outer, np.dot(inner, right_outer))
"""
Direct computation of (K^-1)b exploiting the matrix inversion lemma.
"""
def inv_vec_prod(self, b):
noise = self.kern.noise
inv_noise = float(1) / noise
inv_noise_matrix = inv_noise*np.identity(np.shape(self.X)[0])
inv_sigma = np.diag(1 / self.Sigma)
Ainv = inv_noise_matrix
U = self.U
Cinv = inv_sigma
V = self.VT
right_outer = np.dot(V, np.dot(Ainv, b))
inner = np.linalg.inv(Cinv + np.dot(V, np.dot(Ainv, U)))
left_outer = np.dot(Ainv, np.dot(U, np.dot(inner, right_outer)))
return np.dot(Ainv, b) - left_outer
"""
Inversion of preconditioner for Laplace Approximation.
"""
def get_laplace_inversion(self, W , Wsqrt):
self.N = np.shape(self.X)[0]
self.M = np.shape(self.Sigma)[0]
eigs = np.sqrt(self.Sigma)
eigsD = np.diag(eigs)
left = np.dot(self.U, eigsD)
right = np.dot(eigsD, self.VT)
return self.laplace_woodbury_inversion(left, right, W.flatten(), Wsqrt.flatten())
def laplace_woodbury_inversion(self, U, V, W, Wsqrt):
left_outer = np.dot(np.diag(Wsqrt), U)
right_outer = np.dot(V, np.diag(Wsqrt))
inner = np.linalg.inv(np.identity(self.M) + np.dot(V, np.dot(np.diag(W), U)))
return np.identity(self.N) - np.dot(left_outer, np.dot(inner, right_outer))
| 2,924 | 27.676471 | 101 | py |
preconditioned_GPs | preconditioned_GPs-master/code/pcgComparison/PcgComp/PcgComp/preconditioners/kiss.py | #This implementation of Structured Kernel Interpolation is based on the article:
#
# @inproceedings{DBLP:conf/icml/WilsonN15,
# author = {Andrew Gordon Wilson and
# Hannes Nickisch},
# title = {Kernel Interpolation for Scalable Structured Gaussian Processes {(KISS-GP)}},
# booktitle = {Proceedings of the 32nd International Conference on Machine Learning,
# {ICML} 2015, Lille, France, 6-11 July 2015},
# pages = {1775--1784},
# year = {2015},
# crossref = {DBLP:conf/icml/2015},
# url = {http://jmlr.org/proceedings/papers/v37/wilson15.html},
# timestamp = {Sun, 05 Jul 2015 19:10:23 +0200},
# biburl = {http://dblp.uni-trier.de/rec/bib/conf/icml/WilsonN15},
# bibsource = {dblp computer science bibliography, http://dblp.org}
# }
import numpy as np
import time
from preconditioner import Preconditioner
from ..util.kronHelper import KronHelper
import math
"""
SKI Preconditioner
"""
class Kiss(Preconditioner):
"""
Construct preconditioning matrix
X - Training data
kern - Class of kernel function
"""
def __init__(self, X, kern):
super(Kiss, self).__init__("Kiss")
self.X = X
self.kern = kern
start = time.time()
Xnew = self.normalize_columns(X)
N = Xnew.shape[0]
D = Xnew.shape[1]
num_grid_interval = np.zeros((D))
maximum = np.zeros(D)
minimum = np.zeros(D)
for i in xrange(D):
maximum[i] = max(X[:,i])
minimum[i] = min(X[:,i])
num_grid_interval[i] = round(N**(float(3)/float(2*D)))#round((N**2)**(float(1)/D))
if (num_grid_interval[i] == 1):
num_grid_interval[i] = 2
# construct grid vectors and intervals
interval = np.zeros(D)
vector = np.zeros(D, dtype=object)
for i in xrange(D):
[vector[i],interval[i]] = np.linspace(0, 1, num=num_grid_interval[i], retstep=True)
for i in xrange(D):
num_grid_interval[i] = len(vector[i])
interval_matrix = np.zeros((N, D))
assign = np.zeros(N)
for i in xrange(D):
for j in xrange(N):
interval_matrix[j][i] = self.get_rounded_threshold(Xnew[j][i], interval[i], len(vector[i]), 0, 1)
# construct weight matrix
for j in xrange(N):
val =0
for t in xrange(D):
val = val + interval_matrix[j][t]*np.prod(num_grid_interval[t+1:D])
assign[j] = val
W = np.zeros((N,np.prod(num_grid_interval)))
for i in xrange(N):
index = assign[i]
W[i][index] = 1
kron_helper = KronHelper()
unnormalzed_vector = self.reverse_normalize(vector, minimum, maximum)
[K, Kds] = kron_helper.kron_inference(unnormalzed_vector, D, kern)
#Kski = np.dot(np.dot(W, K), W.T)
self.W = W
self.Ku = Kds
self.precon = None
self.duration = time.time() - start
"""
Normalize the given training data
"""
def normalize_columns(self, array):
arr = array.copy()
rows, cols = arr.shape
for col in xrange(cols):
maxim = arr[:,col].max()
minim = arr[:,col].min()
arr[:,col] = (arr[:,col] - minim) / (maxim - minim)
return arr
"""
Reverse the normalization carried out on the data
"""
def reverse_normalize(self, array, minimum, maximum):
new_array = np.zeros(len(array), dtype=object)
for i in xrange(len(array)):
new_array[i] = array[i]*(maximum[i] - minimum[i]) + minimum[i]
return new_array
"""
Assign points to designated nearest location in the grid
"""
def get_rounded_threshold(self, a, min_clip, max_interval, minim, maxim):
interval = round(float(a) / min_clip)
rounded_val = interval * min_clip
if (rounded_val > maxim):
return max_interval
if (rounded_val < minim):
return 0
return interval
| 4,131 | 30.784615 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.