id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
237,900
astropy/regions
ah_bootstrap.py
_Bootstrapper._check_submodule
def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git()
python
def _check_submodule(self): """ Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details. """ if (self.path is None or (os.path.exists(self.path) and not os.path.isdir(self.path))): return False if self.use_git: return self._check_submodule_using_git() else: return self._check_submodule_no_git()
[ "def", "_check_submodule", "(", "self", ")", ":", "if", "(", "self", ".", "path", "is", "None", "or", "(", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "path", ")", ")", ")", ":", "return", "False", "if", "self", ".", "use_git", ":", "return", "self", ".", "_check_submodule_using_git", "(", ")", "else", ":", "return", "self", ".", "_check_submodule_no_git", "(", ")" ]
Check if the given path is a git submodule. See the docstrings for ``_check_submodule_using_git`` and ``_check_submodule_no_git`` for further details.
[ "Check", "if", "the", "given", "path", "is", "a", "git", "submodule", "." ]
452d962c417e4ff20d1268f99535c6ff89c83437
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L607-L622
237,901
EconForge/dolo
dolo/numeric/tensor.py
sdot
def sdot( U, V ): ''' Computes the tensorproduct reducing last dimensoin of U with first dimension of V. For matrices, it is equal to regular matrix product. ''' nu = U.ndim #nv = V.ndim return np.tensordot( U, V, axes=(nu-1,0) )
python
def sdot( U, V ): ''' Computes the tensorproduct reducing last dimensoin of U with first dimension of V. For matrices, it is equal to regular matrix product. ''' nu = U.ndim #nv = V.ndim return np.tensordot( U, V, axes=(nu-1,0) )
[ "def", "sdot", "(", "U", ",", "V", ")", ":", "nu", "=", "U", ".", "ndim", "#nv = V.ndim", "return", "np", ".", "tensordot", "(", "U", ",", "V", ",", "axes", "=", "(", "nu", "-", "1", ",", "0", ")", ")" ]
Computes the tensorproduct reducing last dimensoin of U with first dimension of V. For matrices, it is equal to regular matrix product.
[ "Computes", "the", "tensorproduct", "reducing", "last", "dimensoin", "of", "U", "with", "first", "dimension", "of", "V", ".", "For", "matrices", "it", "is", "equal", "to", "regular", "matrix", "product", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/tensor.py#L44-L51
237,902
EconForge/dolo
dolo/numeric/interpolation/smolyak.py
SmolyakBasic.set_values
def set_values(self,x): """ Updates self.theta parameter. No returns values""" x = numpy.atleast_2d(x) x = x.real # ahem C_inv = self.__C_inv__ theta = numpy.dot( x, C_inv ) self.theta = theta return theta
python
def set_values(self,x): """ Updates self.theta parameter. No returns values""" x = numpy.atleast_2d(x) x = x.real # ahem C_inv = self.__C_inv__ theta = numpy.dot( x, C_inv ) self.theta = theta return theta
[ "def", "set_values", "(", "self", ",", "x", ")", ":", "x", "=", "numpy", ".", "atleast_2d", "(", "x", ")", "x", "=", "x", ".", "real", "# ahem", "C_inv", "=", "self", ".", "__C_inv__", "theta", "=", "numpy", ".", "dot", "(", "x", ",", "C_inv", ")", "self", ".", "theta", "=", "theta", "return", "theta" ]
Updates self.theta parameter. No returns values
[ "Updates", "self", ".", "theta", "parameter", ".", "No", "returns", "values" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/interpolation/smolyak.py#L256-L267
237,903
EconForge/dolo
dolo/numeric/discretization/discretization.py
tauchen
def tauchen(N, mu, rho, sigma, m=2): """ Approximate an AR1 process by a finite markov chain using Tauchen's method. :param N: scalar, number of nodes for Z :param mu: scalar, unconditional mean of process :param rho: scalar :param sigma: scalar, std. dev. of epsilons :param m: max +- std. devs. :returns: Z, N*1 vector, nodes for Z. Zprob, N*N matrix, transition probabilities SJB: This is a port of Martin Floden's 1996 Matlab code to implement Tauchen 1986 Economic Letters method The following comments are Floden's. Finds a Markov chain whose sample paths approximate those of the AR(1) process z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1) where eps are normal with stddev sigma. """ Z = np.zeros((N,1)) Zprob = np.zeros((N,N)) a = (1-rho)*mu Z[-1] = m * math.sqrt(sigma**2 / (1 - (rho**2))) Z[0] = -1 * Z[-1] zstep = (Z[-1] - Z[0]) / (N - 1) for i in range(1,N): Z[i] = Z[0] + zstep * (i) Z = Z + a / (1-rho) for j in range(0,N): for k in range(0,N): if k == 0: Zprob[j,k] = sp.stats.norm.cdf((Z[0] - a - rho * Z[j] + zstep / 2) / sigma) elif k == (N-1): Zprob[j,k] = 1 - sp.stats.norm.cdf((Z[-1] - a - rho * Z[j] - zstep / 2) / sigma) else: up = sp.stats.norm.cdf((Z[k] - a - rho * Z[j] + zstep / 2) / sigma) down = sp.stats.norm.cdf( (Z[k] - a - rho * Z[j] - zstep / 2) / sigma) Zprob[j,k] = up - down return( (Z, Zprob) )
python
def tauchen(N, mu, rho, sigma, m=2): """ Approximate an AR1 process by a finite markov chain using Tauchen's method. :param N: scalar, number of nodes for Z :param mu: scalar, unconditional mean of process :param rho: scalar :param sigma: scalar, std. dev. of epsilons :param m: max +- std. devs. :returns: Z, N*1 vector, nodes for Z. Zprob, N*N matrix, transition probabilities SJB: This is a port of Martin Floden's 1996 Matlab code to implement Tauchen 1986 Economic Letters method The following comments are Floden's. Finds a Markov chain whose sample paths approximate those of the AR(1) process z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1) where eps are normal with stddev sigma. """ Z = np.zeros((N,1)) Zprob = np.zeros((N,N)) a = (1-rho)*mu Z[-1] = m * math.sqrt(sigma**2 / (1 - (rho**2))) Z[0] = -1 * Z[-1] zstep = (Z[-1] - Z[0]) / (N - 1) for i in range(1,N): Z[i] = Z[0] + zstep * (i) Z = Z + a / (1-rho) for j in range(0,N): for k in range(0,N): if k == 0: Zprob[j,k] = sp.stats.norm.cdf((Z[0] - a - rho * Z[j] + zstep / 2) / sigma) elif k == (N-1): Zprob[j,k] = 1 - sp.stats.norm.cdf((Z[-1] - a - rho * Z[j] - zstep / 2) / sigma) else: up = sp.stats.norm.cdf((Z[k] - a - rho * Z[j] + zstep / 2) / sigma) down = sp.stats.norm.cdf( (Z[k] - a - rho * Z[j] - zstep / 2) / sigma) Zprob[j,k] = up - down return( (Z, Zprob) )
[ "def", "tauchen", "(", "N", ",", "mu", ",", "rho", ",", "sigma", ",", "m", "=", "2", ")", ":", "Z", "=", "np", ".", "zeros", "(", "(", "N", ",", "1", ")", ")", "Zprob", "=", "np", ".", "zeros", "(", "(", "N", ",", "N", ")", ")", "a", "=", "(", "1", "-", "rho", ")", "*", "mu", "Z", "[", "-", "1", "]", "=", "m", "*", "math", ".", "sqrt", "(", "sigma", "**", "2", "/", "(", "1", "-", "(", "rho", "**", "2", ")", ")", ")", "Z", "[", "0", "]", "=", "-", "1", "*", "Z", "[", "-", "1", "]", "zstep", "=", "(", "Z", "[", "-", "1", "]", "-", "Z", "[", "0", "]", ")", "/", "(", "N", "-", "1", ")", "for", "i", "in", "range", "(", "1", ",", "N", ")", ":", "Z", "[", "i", "]", "=", "Z", "[", "0", "]", "+", "zstep", "*", "(", "i", ")", "Z", "=", "Z", "+", "a", "/", "(", "1", "-", "rho", ")", "for", "j", "in", "range", "(", "0", ",", "N", ")", ":", "for", "k", "in", "range", "(", "0", ",", "N", ")", ":", "if", "k", "==", "0", ":", "Zprob", "[", "j", ",", "k", "]", "=", "sp", ".", "stats", ".", "norm", ".", "cdf", "(", "(", "Z", "[", "0", "]", "-", "a", "-", "rho", "*", "Z", "[", "j", "]", "+", "zstep", "/", "2", ")", "/", "sigma", ")", "elif", "k", "==", "(", "N", "-", "1", ")", ":", "Zprob", "[", "j", ",", "k", "]", "=", "1", "-", "sp", ".", "stats", ".", "norm", ".", "cdf", "(", "(", "Z", "[", "-", "1", "]", "-", "a", "-", "rho", "*", "Z", "[", "j", "]", "-", "zstep", "/", "2", ")", "/", "sigma", ")", "else", ":", "up", "=", "sp", ".", "stats", ".", "norm", ".", "cdf", "(", "(", "Z", "[", "k", "]", "-", "a", "-", "rho", "*", "Z", "[", "j", "]", "+", "zstep", "/", "2", ")", "/", "sigma", ")", "down", "=", "sp", ".", "stats", ".", "norm", ".", "cdf", "(", "(", "Z", "[", "k", "]", "-", "a", "-", "rho", "*", "Z", "[", "j", "]", "-", "zstep", "/", "2", ")", "/", "sigma", ")", "Zprob", "[", "j", ",", "k", "]", "=", "up", "-", "down", "return", "(", "(", "Z", ",", "Zprob", ")", ")" ]
Approximate an AR1 process by a finite markov chain using Tauchen's method. :param N: scalar, number of nodes for Z :param mu: scalar, unconditional mean of process :param rho: scalar :param sigma: scalar, std. dev. of epsilons :param m: max +- std. devs. :returns: Z, N*1 vector, nodes for Z. Zprob, N*N matrix, transition probabilities SJB: This is a port of Martin Floden's 1996 Matlab code to implement Tauchen 1986 Economic Letters method The following comments are Floden's. Finds a Markov chain whose sample paths approximate those of the AR(1) process z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1) where eps are normal with stddev sigma.
[ "Approximate", "an", "AR1", "process", "by", "a", "finite", "markov", "chain", "using", "Tauchen", "s", "method", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L13-L50
237,904
EconForge/dolo
dolo/numeric/discretization/discretization.py
rouwenhorst
def rouwenhorst(rho, sigma, N): """ Approximate an AR1 process by a finite markov chain using Rouwenhorst's method. :param rho: autocorrelation of the AR1 process :param sigma: conditional standard deviation of the AR1 process :param N: number of states :return [nodes, P]: equally spaced nodes and transition matrix """ from numpy import sqrt, linspace, array,zeros sigma = float(sigma) if N == 1: nodes = array([0.0]) transitions = array([[1.0]]) return [nodes, transitions] p = (rho+1)/2 q = p nu = sqrt( (N-1)/(1-rho**2) )*sigma nodes = linspace( -nu, nu, N) sig_a = sigma n = 1 # mat0 = array( [[1]] ) mat0 = array([[p,1-p],[1-q,q]]) if N == 2: return [nodes,mat0] for n in range(3,N+1): mat = zeros( (n,n) ) mat_A = mat.copy() mat_B = mat.copy() mat_C = mat.copy() mat_D = mat.copy() mat_A[:-1,:-1] = mat0 mat_B[:-1,1:] = mat0 mat_C[1:,:-1] = mat0 mat_D[1:,1:] = mat0 mat0 = p*mat_A + (1-p)*mat_B + (1-q)*mat_C + q*mat_D mat0[1:-1,:] = mat0[1:-1,:]/2 P = mat0 return [nodes, P]
python
def rouwenhorst(rho, sigma, N): """ Approximate an AR1 process by a finite markov chain using Rouwenhorst's method. :param rho: autocorrelation of the AR1 process :param sigma: conditional standard deviation of the AR1 process :param N: number of states :return [nodes, P]: equally spaced nodes and transition matrix """ from numpy import sqrt, linspace, array,zeros sigma = float(sigma) if N == 1: nodes = array([0.0]) transitions = array([[1.0]]) return [nodes, transitions] p = (rho+1)/2 q = p nu = sqrt( (N-1)/(1-rho**2) )*sigma nodes = linspace( -nu, nu, N) sig_a = sigma n = 1 # mat0 = array( [[1]] ) mat0 = array([[p,1-p],[1-q,q]]) if N == 2: return [nodes,mat0] for n in range(3,N+1): mat = zeros( (n,n) ) mat_A = mat.copy() mat_B = mat.copy() mat_C = mat.copy() mat_D = mat.copy() mat_A[:-1,:-1] = mat0 mat_B[:-1,1:] = mat0 mat_C[1:,:-1] = mat0 mat_D[1:,1:] = mat0 mat0 = p*mat_A + (1-p)*mat_B + (1-q)*mat_C + q*mat_D mat0[1:-1,:] = mat0[1:-1,:]/2 P = mat0 return [nodes, P]
[ "def", "rouwenhorst", "(", "rho", ",", "sigma", ",", "N", ")", ":", "from", "numpy", "import", "sqrt", ",", "linspace", ",", "array", ",", "zeros", "sigma", "=", "float", "(", "sigma", ")", "if", "N", "==", "1", ":", "nodes", "=", "array", "(", "[", "0.0", "]", ")", "transitions", "=", "array", "(", "[", "[", "1.0", "]", "]", ")", "return", "[", "nodes", ",", "transitions", "]", "p", "=", "(", "rho", "+", "1", ")", "/", "2", "q", "=", "p", "nu", "=", "sqrt", "(", "(", "N", "-", "1", ")", "/", "(", "1", "-", "rho", "**", "2", ")", ")", "*", "sigma", "nodes", "=", "linspace", "(", "-", "nu", ",", "nu", ",", "N", ")", "sig_a", "=", "sigma", "n", "=", "1", "# mat0 = array( [[1]] )", "mat0", "=", "array", "(", "[", "[", "p", ",", "1", "-", "p", "]", ",", "[", "1", "-", "q", ",", "q", "]", "]", ")", "if", "N", "==", "2", ":", "return", "[", "nodes", ",", "mat0", "]", "for", "n", "in", "range", "(", "3", ",", "N", "+", "1", ")", ":", "mat", "=", "zeros", "(", "(", "n", ",", "n", ")", ")", "mat_A", "=", "mat", ".", "copy", "(", ")", "mat_B", "=", "mat", ".", "copy", "(", ")", "mat_C", "=", "mat", ".", "copy", "(", ")", "mat_D", "=", "mat", ".", "copy", "(", ")", "mat_A", "[", ":", "-", "1", ",", ":", "-", "1", "]", "=", "mat0", "mat_B", "[", ":", "-", "1", ",", "1", ":", "]", "=", "mat0", "mat_C", "[", "1", ":", ",", ":", "-", "1", "]", "=", "mat0", "mat_D", "[", "1", ":", ",", "1", ":", "]", "=", "mat0", "mat0", "=", "p", "*", "mat_A", "+", "(", "1", "-", "p", ")", "*", "mat_B", "+", "(", "1", "-", "q", ")", "*", "mat_C", "+", "q", "*", "mat_D", "mat0", "[", "1", ":", "-", "1", ",", ":", "]", "=", "mat0", "[", "1", ":", "-", "1", ",", ":", "]", "/", "2", "P", "=", "mat0", "return", "[", "nodes", ",", "P", "]" ]
Approximate an AR1 process by a finite markov chain using Rouwenhorst's method. :param rho: autocorrelation of the AR1 process :param sigma: conditional standard deviation of the AR1 process :param N: number of states :return [nodes, P]: equally spaced nodes and transition matrix
[ "Approximate", "an", "AR1", "process", "by", "a", "finite", "markov", "chain", "using", "Rouwenhorst", "s", "method", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L53-L97
237,905
EconForge/dolo
dolo/numeric/discretization/discretization.py
tensor_markov
def tensor_markov( *args ): """Computes the product of two independent markov chains. :param m1: a tuple containing the nodes and the transition matrix of the first chain :param m2: a tuple containing the nodes and the transition matrix of the second chain :return: a tuple containing the nodes and the transition matrix of the product chain """ if len(args) > 2: m1 = args[0] m2 = args[1] tail = args[2:] prod = tensor_markov(m1,m2) return tensor_markov( prod, tail ) elif len(args) == 2: m1,m2 = args n1, t1 = m1 n2, t2 = m2 n1 = np.array(n1, dtype=float) n2 = np.array(n2, dtype=float) t1 = np.array(t1, dtype=float) t2 = np.array(t2, dtype=float) assert(n1.shape[0] == t1.shape[0] == t1.shape[1]) assert(n2.shape[0] == t2.shape[0] == t2.shape[1]) t = np.kron(t1, t2) p = t1.shape[0] q = t2.shape[0] np.tile( n2, (1,p)) # n = np.row_stack([ # np.repeat(n1, q, axis=1), # np.tile( n2, (1,p)) # ]) n = np.column_stack([ np.repeat(n1, q, axis=0), np.tile( n2, (p,1)) ]) return [n,t] else: raise Exception("Incorrect number of arguments. Expected at least 2. Found {}.".format(len(args)))
python
def tensor_markov( *args ): """Computes the product of two independent markov chains. :param m1: a tuple containing the nodes and the transition matrix of the first chain :param m2: a tuple containing the nodes and the transition matrix of the second chain :return: a tuple containing the nodes and the transition matrix of the product chain """ if len(args) > 2: m1 = args[0] m2 = args[1] tail = args[2:] prod = tensor_markov(m1,m2) return tensor_markov( prod, tail ) elif len(args) == 2: m1,m2 = args n1, t1 = m1 n2, t2 = m2 n1 = np.array(n1, dtype=float) n2 = np.array(n2, dtype=float) t1 = np.array(t1, dtype=float) t2 = np.array(t2, dtype=float) assert(n1.shape[0] == t1.shape[0] == t1.shape[1]) assert(n2.shape[0] == t2.shape[0] == t2.shape[1]) t = np.kron(t1, t2) p = t1.shape[0] q = t2.shape[0] np.tile( n2, (1,p)) # n = np.row_stack([ # np.repeat(n1, q, axis=1), # np.tile( n2, (1,p)) # ]) n = np.column_stack([ np.repeat(n1, q, axis=0), np.tile( n2, (p,1)) ]) return [n,t] else: raise Exception("Incorrect number of arguments. Expected at least 2. Found {}.".format(len(args)))
[ "def", "tensor_markov", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", ">", "2", ":", "m1", "=", "args", "[", "0", "]", "m2", "=", "args", "[", "1", "]", "tail", "=", "args", "[", "2", ":", "]", "prod", "=", "tensor_markov", "(", "m1", ",", "m2", ")", "return", "tensor_markov", "(", "prod", ",", "tail", ")", "elif", "len", "(", "args", ")", "==", "2", ":", "m1", ",", "m2", "=", "args", "n1", ",", "t1", "=", "m1", "n2", ",", "t2", "=", "m2", "n1", "=", "np", ".", "array", "(", "n1", ",", "dtype", "=", "float", ")", "n2", "=", "np", ".", "array", "(", "n2", ",", "dtype", "=", "float", ")", "t1", "=", "np", ".", "array", "(", "t1", ",", "dtype", "=", "float", ")", "t2", "=", "np", ".", "array", "(", "t2", ",", "dtype", "=", "float", ")", "assert", "(", "n1", ".", "shape", "[", "0", "]", "==", "t1", ".", "shape", "[", "0", "]", "==", "t1", ".", "shape", "[", "1", "]", ")", "assert", "(", "n2", ".", "shape", "[", "0", "]", "==", "t2", ".", "shape", "[", "0", "]", "==", "t2", ".", "shape", "[", "1", "]", ")", "t", "=", "np", ".", "kron", "(", "t1", ",", "t2", ")", "p", "=", "t1", ".", "shape", "[", "0", "]", "q", "=", "t2", ".", "shape", "[", "0", "]", "np", ".", "tile", "(", "n2", ",", "(", "1", ",", "p", ")", ")", "# n = np.row_stack([", "# np.repeat(n1, q, axis=1),", "# np.tile( n2, (1,p))", "# ])", "n", "=", "np", ".", "column_stack", "(", "[", "np", ".", "repeat", "(", "n1", ",", "q", ",", "axis", "=", "0", ")", ",", "np", ".", "tile", "(", "n2", ",", "(", "p", ",", "1", ")", ")", "]", ")", "return", "[", "n", ",", "t", "]", "else", ":", "raise", "Exception", "(", "\"Incorrect number of arguments. Expected at least 2. Found {}.\"", ".", "format", "(", "len", "(", "args", ")", ")", ")" ]
Computes the product of two independent markov chains. :param m1: a tuple containing the nodes and the transition matrix of the first chain :param m2: a tuple containing the nodes and the transition matrix of the second chain :return: a tuple containing the nodes and the transition matrix of the product chain
[ "Computes", "the", "product", "of", "two", "independent", "markov", "chains", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L155-L201
237,906
EconForge/dolo
trash/dolo/misc/modfile.py
dynare_import
def dynare_import(filename,full_output=False, debug=False): '''Imports model defined in specified file''' import os basename = os.path.basename(filename) fname = re.compile('(.*)\.(.*)').match(basename).group(1) f = open(filename) txt = f.read() model = parse_dynare_text(txt,full_output=full_output, debug=debug) model.name = fname return model
python
def dynare_import(filename,full_output=False, debug=False): '''Imports model defined in specified file''' import os basename = os.path.basename(filename) fname = re.compile('(.*)\.(.*)').match(basename).group(1) f = open(filename) txt = f.read() model = parse_dynare_text(txt,full_output=full_output, debug=debug) model.name = fname return model
[ "def", "dynare_import", "(", "filename", ",", "full_output", "=", "False", ",", "debug", "=", "False", ")", ":", "import", "os", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "fname", "=", "re", ".", "compile", "(", "'(.*)\\.(.*)'", ")", ".", "match", "(", "basename", ")", ".", "group", "(", "1", ")", "f", "=", "open", "(", "filename", ")", "txt", "=", "f", ".", "read", "(", ")", "model", "=", "parse_dynare_text", "(", "txt", ",", "full_output", "=", "full_output", ",", "debug", "=", "debug", ")", "model", ".", "name", "=", "fname", "return", "model" ]
Imports model defined in specified file
[ "Imports", "model", "defined", "in", "specified", "file" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/misc/modfile.py#L311-L320
237,907
EconForge/dolo
dolo/algos/perfect_foresight.py
_shocks_to_epsilons
def _shocks_to_epsilons(model, shocks, T): """ Helper function to support input argument `shocks` being one of many different data types. Will always return a `T, n_e` matrix. """ n_e = len(model.calibration['exogenous']) # if we have a DataFrame, convert it to a dict and rely on the method below if isinstance(shocks, pd.DataFrame): shocks = {k: shocks[k].tolist() for k in shocks.columns} # handle case where shocks might be a dict. Be careful to handle case where # value arrays are not the same length if isinstance(shocks, dict): epsilons = np.zeros((T + 1, n_e)) for (i, k) in enumerate(model.symbols["exogenous"]): if k in shocks: this_shock = shocks[k] epsilons[:len(this_shock), i] = this_shock epsilons[len(this_shock):, i] = this_shock[-1] else: # otherwise set to value in calibration epsilons[:, i] = model.calibration["exogenous"][i] return epsilons # read from calibration if not given if shocks is None: shocks = model.calibration["exogenous"] # now we just assume that shocks is array-like and try using the output of # np.asarray(shocks) shocks = np.asarray(shocks) shocks = shocks.reshape((-1, n_e)) # until last period, exogenous shock takes its last value epsilons = np.zeros((T + 1, n_e)) epsilons[:(shocks.shape[0] - 1), :] = shocks[1:, :] epsilons[(shocks.shape[0] - 1):, :] = shocks[-1:, :] return epsilons
python
def _shocks_to_epsilons(model, shocks, T): """ Helper function to support input argument `shocks` being one of many different data types. Will always return a `T, n_e` matrix. """ n_e = len(model.calibration['exogenous']) # if we have a DataFrame, convert it to a dict and rely on the method below if isinstance(shocks, pd.DataFrame): shocks = {k: shocks[k].tolist() for k in shocks.columns} # handle case where shocks might be a dict. Be careful to handle case where # value arrays are not the same length if isinstance(shocks, dict): epsilons = np.zeros((T + 1, n_e)) for (i, k) in enumerate(model.symbols["exogenous"]): if k in shocks: this_shock = shocks[k] epsilons[:len(this_shock), i] = this_shock epsilons[len(this_shock):, i] = this_shock[-1] else: # otherwise set to value in calibration epsilons[:, i] = model.calibration["exogenous"][i] return epsilons # read from calibration if not given if shocks is None: shocks = model.calibration["exogenous"] # now we just assume that shocks is array-like and try using the output of # np.asarray(shocks) shocks = np.asarray(shocks) shocks = shocks.reshape((-1, n_e)) # until last period, exogenous shock takes its last value epsilons = np.zeros((T + 1, n_e)) epsilons[:(shocks.shape[0] - 1), :] = shocks[1:, :] epsilons[(shocks.shape[0] - 1):, :] = shocks[-1:, :] return epsilons
[ "def", "_shocks_to_epsilons", "(", "model", ",", "shocks", ",", "T", ")", ":", "n_e", "=", "len", "(", "model", ".", "calibration", "[", "'exogenous'", "]", ")", "# if we have a DataFrame, convert it to a dict and rely on the method below", "if", "isinstance", "(", "shocks", ",", "pd", ".", "DataFrame", ")", ":", "shocks", "=", "{", "k", ":", "shocks", "[", "k", "]", ".", "tolist", "(", ")", "for", "k", "in", "shocks", ".", "columns", "}", "# handle case where shocks might be a dict. Be careful to handle case where", "# value arrays are not the same length", "if", "isinstance", "(", "shocks", ",", "dict", ")", ":", "epsilons", "=", "np", ".", "zeros", "(", "(", "T", "+", "1", ",", "n_e", ")", ")", "for", "(", "i", ",", "k", ")", "in", "enumerate", "(", "model", ".", "symbols", "[", "\"exogenous\"", "]", ")", ":", "if", "k", "in", "shocks", ":", "this_shock", "=", "shocks", "[", "k", "]", "epsilons", "[", ":", "len", "(", "this_shock", ")", ",", "i", "]", "=", "this_shock", "epsilons", "[", "len", "(", "this_shock", ")", ":", ",", "i", "]", "=", "this_shock", "[", "-", "1", "]", "else", ":", "# otherwise set to value in calibration", "epsilons", "[", ":", ",", "i", "]", "=", "model", ".", "calibration", "[", "\"exogenous\"", "]", "[", "i", "]", "return", "epsilons", "# read from calibration if not given", "if", "shocks", "is", "None", ":", "shocks", "=", "model", ".", "calibration", "[", "\"exogenous\"", "]", "# now we just assume that shocks is array-like and try using the output of", "# np.asarray(shocks)", "shocks", "=", "np", ".", "asarray", "(", "shocks", ")", "shocks", "=", "shocks", ".", "reshape", "(", "(", "-", "1", ",", "n_e", ")", ")", "# until last period, exogenous shock takes its last value", "epsilons", "=", "np", ".", "zeros", "(", "(", "T", "+", "1", ",", "n_e", ")", ")", "epsilons", "[", ":", "(", "shocks", ".", "shape", "[", "0", "]", "-", "1", ")", ",", ":", "]", "=", "shocks", "[", "1", ":", ",", ":", "]", "epsilons", "[", "(", "shocks", ".", "shape", "[", "0", "]", "-", "1", ")", ":", ",", ":", "]", "=", "shocks", "[", "-", "1", ":", ",", ":", "]", "return", "epsilons" ]
Helper function to support input argument `shocks` being one of many different data types. Will always return a `T, n_e` matrix.
[ "Helper", "function", "to", "support", "input", "argument", "shocks", "being", "one", "of", "many", "different", "data", "types", ".", "Will", "always", "return", "a", "T", "n_e", "matrix", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/algos/perfect_foresight.py#L9-L49
237,908
EconForge/dolo
trash/dolo/misc/symbolic_interactive.py
clear_all
def clear_all(): """ Clears all parameters, variables, and shocks defined previously """ frame = inspect.currentframe().f_back try: if frame.f_globals.get('variables_order'): # we should avoid to declare symbols twice ! del frame.f_globals['variables_order'] if frame.f_globals.get('parameters_order'): # we should avoid to declare symbols twice ! del frame.f_globals['parameters_order'] finally: del frame
python
def clear_all(): """ Clears all parameters, variables, and shocks defined previously """ frame = inspect.currentframe().f_back try: if frame.f_globals.get('variables_order'): # we should avoid to declare symbols twice ! del frame.f_globals['variables_order'] if frame.f_globals.get('parameters_order'): # we should avoid to declare symbols twice ! del frame.f_globals['parameters_order'] finally: del frame
[ "def", "clear_all", "(", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", "try", ":", "if", "frame", ".", "f_globals", ".", "get", "(", "'variables_order'", ")", ":", "# we should avoid to declare symbols twice !", "del", "frame", ".", "f_globals", "[", "'variables_order'", "]", "if", "frame", ".", "f_globals", ".", "get", "(", "'parameters_order'", ")", ":", "# we should avoid to declare symbols twice !", "del", "frame", ".", "f_globals", "[", "'parameters_order'", "]", "finally", ":", "del", "frame" ]
Clears all parameters, variables, and shocks defined previously
[ "Clears", "all", "parameters", "variables", "and", "shocks", "defined", "previously" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/misc/symbolic_interactive.py#L319-L333
237,909
EconForge/dolo
trash/dolo/algos/dtcscc/nonlinearsystem.py
nonlinear_system
def nonlinear_system(model, initial_dr=None, maxit=10, tol=1e-8, grid={}, distribution={}, verbose=True): ''' Finds a global solution for ``model`` by solving one large system of equations using a simple newton algorithm. Parameters ---------- model: NumericModel "dtcscc" model to be solved verbose: boolean if True, display iterations initial_dr: decision rule initial guess for the decision rule maxit: int maximum number of iterationsd tol: tolerance criterium for successive approximations grid: grid options distribution: distribution options Returns ------- decision rule : approximated solution ''' if verbose: headline = '|{0:^4} | {1:10} | {2:8} |' headline = headline.format('N', ' Error', 'Time') stars = '-'*len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |' f = model.functions['arbitrage'] g = model.functions['transition'] p = model.calibration['parameters'] distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() approx = model.get_grid(**grid) ms = create_interpolator(approx, approx.interpolation) grid = ms.grid if initial_dr is None: dr = approximate_controls(model) else: dr = initial_dr ms.set_values(dr(grid)) x = dr(grid) x0 = x.copy() it = 0 err = 10 a0 = x0.copy().reshape((x0.shape[0]*x0.shape[1],)) a = a0.copy() while err > tol and it < maxit: it += 1 t1 = time.time() r, da = residuals(f, g, grid, a.reshape(x0.shape), ms, nodes, weights, p, diff=True)[:2] r = r.flatten() err = abs(r).max() t2 = time.time() if verbose: print(fmt_str.format(it, err, t2-t1)) if err > tol: a -= scipy.sparse.linalg.spsolve(da, r) if verbose: print(stars) return ms
python
def nonlinear_system(model, initial_dr=None, maxit=10, tol=1e-8, grid={}, distribution={}, verbose=True): ''' Finds a global solution for ``model`` by solving one large system of equations using a simple newton algorithm. Parameters ---------- model: NumericModel "dtcscc" model to be solved verbose: boolean if True, display iterations initial_dr: decision rule initial guess for the decision rule maxit: int maximum number of iterationsd tol: tolerance criterium for successive approximations grid: grid options distribution: distribution options Returns ------- decision rule : approximated solution ''' if verbose: headline = '|{0:^4} | {1:10} | {2:8} |' headline = headline.format('N', ' Error', 'Time') stars = '-'*len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |' f = model.functions['arbitrage'] g = model.functions['transition'] p = model.calibration['parameters'] distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() approx = model.get_grid(**grid) ms = create_interpolator(approx, approx.interpolation) grid = ms.grid if initial_dr is None: dr = approximate_controls(model) else: dr = initial_dr ms.set_values(dr(grid)) x = dr(grid) x0 = x.copy() it = 0 err = 10 a0 = x0.copy().reshape((x0.shape[0]*x0.shape[1],)) a = a0.copy() while err > tol and it < maxit: it += 1 t1 = time.time() r, da = residuals(f, g, grid, a.reshape(x0.shape), ms, nodes, weights, p, diff=True)[:2] r = r.flatten() err = abs(r).max() t2 = time.time() if verbose: print(fmt_str.format(it, err, t2-t1)) if err > tol: a -= scipy.sparse.linalg.spsolve(da, r) if verbose: print(stars) return ms
[ "def", "nonlinear_system", "(", "model", ",", "initial_dr", "=", "None", ",", "maxit", "=", "10", ",", "tol", "=", "1e-8", ",", "grid", "=", "{", "}", ",", "distribution", "=", "{", "}", ",", "verbose", "=", "True", ")", ":", "if", "verbose", ":", "headline", "=", "'|{0:^4} | {1:10} | {2:8} |'", "headline", "=", "headline", ".", "format", "(", "'N'", ",", "' Error'", ",", "'Time'", ")", "stars", "=", "'-'", "*", "len", "(", "headline", ")", "print", "(", "stars", ")", "print", "(", "headline", ")", "print", "(", "stars", ")", "# format string for within loop", "fmt_str", "=", "'|{0:4} | {1:10.3e} | {2:8.3f} |'", "f", "=", "model", ".", "functions", "[", "'arbitrage'", "]", "g", "=", "model", ".", "functions", "[", "'transition'", "]", "p", "=", "model", ".", "calibration", "[", "'parameters'", "]", "distrib", "=", "model", ".", "get_distribution", "(", "*", "*", "distribution", ")", "nodes", ",", "weights", "=", "distrib", ".", "discretize", "(", ")", "approx", "=", "model", ".", "get_grid", "(", "*", "*", "grid", ")", "ms", "=", "create_interpolator", "(", "approx", ",", "approx", ".", "interpolation", ")", "grid", "=", "ms", ".", "grid", "if", "initial_dr", "is", "None", ":", "dr", "=", "approximate_controls", "(", "model", ")", "else", ":", "dr", "=", "initial_dr", "ms", ".", "set_values", "(", "dr", "(", "grid", ")", ")", "x", "=", "dr", "(", "grid", ")", "x0", "=", "x", ".", "copy", "(", ")", "it", "=", "0", "err", "=", "10", "a0", "=", "x0", ".", "copy", "(", ")", ".", "reshape", "(", "(", "x0", ".", "shape", "[", "0", "]", "*", "x0", ".", "shape", "[", "1", "]", ",", ")", ")", "a", "=", "a0", ".", "copy", "(", ")", "while", "err", ">", "tol", "and", "it", "<", "maxit", ":", "it", "+=", "1", "t1", "=", "time", ".", "time", "(", ")", "r", ",", "da", "=", "residuals", "(", "f", ",", "g", ",", "grid", ",", "a", ".", "reshape", "(", "x0", ".", "shape", ")", ",", "ms", ",", "nodes", ",", "weights", ",", "p", ",", "diff", "=", "True", ")", "[", ":", "2", "]", "r", "=", "r", ".", "flatten", "(", ")", "err", "=", "abs", "(", "r", ")", ".", "max", "(", ")", "t2", "=", "time", ".", "time", "(", ")", "if", "verbose", ":", "print", "(", "fmt_str", ".", "format", "(", "it", ",", "err", ",", "t2", "-", "t1", ")", ")", "if", "err", ">", "tol", ":", "a", "-=", "scipy", ".", "sparse", ".", "linalg", ".", "spsolve", "(", "da", ",", "r", ")", "if", "verbose", ":", "print", "(", "stars", ")", "return", "ms" ]
Finds a global solution for ``model`` by solving one large system of equations using a simple newton algorithm. Parameters ---------- model: NumericModel "dtcscc" model to be solved verbose: boolean if True, display iterations initial_dr: decision rule initial guess for the decision rule maxit: int maximum number of iterationsd tol: tolerance criterium for successive approximations grid: grid options distribution: distribution options Returns ------- decision rule : approximated solution
[ "Finds", "a", "global", "solution", "for", "model", "by", "solving", "one", "large", "system", "of", "equations", "using", "a", "simple", "newton", "algorithm", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/algos/dtcscc/nonlinearsystem.py#L10-L97
237,910
EconForge/dolo
dolo/numeric/discretization/quadrature.py
gauss_hermite_nodes
def gauss_hermite_nodes(orders, sigma, mu=None): ''' Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights ''' if isinstance(orders, int): orders = [orders] import numpy if mu is None: mu = numpy.array( [0]*sigma.shape[0] ) herms = [hermgauss(i) for i in orders] points = [ h[0]*numpy.sqrt(2) for h in herms] weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms] if len(orders) == 1: # Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1. # print(points.shape) x = numpy.array(points[0])*numpy.sqrt(float(sigma)) if sigma.ndim==2: x = x[:,None] w = weights[0] return [x,w] else: x = cartesian( points).T from functools import reduce w = reduce( numpy.kron, weights) zero_columns = numpy.where(sigma.sum(axis=0)==0)[0] for i in zero_columns: sigma[i,i] = 1.0 C = numpy.linalg.cholesky(sigma) x = numpy.dot(C, x) + mu[:,numpy.newaxis] x = numpy.ascontiguousarray(x.T) for i in zero_columns: x[:,i] =0 return [x,w]
python
def gauss_hermite_nodes(orders, sigma, mu=None): ''' Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights ''' if isinstance(orders, int): orders = [orders] import numpy if mu is None: mu = numpy.array( [0]*sigma.shape[0] ) herms = [hermgauss(i) for i in orders] points = [ h[0]*numpy.sqrt(2) for h in herms] weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms] if len(orders) == 1: # Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1. # print(points.shape) x = numpy.array(points[0])*numpy.sqrt(float(sigma)) if sigma.ndim==2: x = x[:,None] w = weights[0] return [x,w] else: x = cartesian( points).T from functools import reduce w = reduce( numpy.kron, weights) zero_columns = numpy.where(sigma.sum(axis=0)==0)[0] for i in zero_columns: sigma[i,i] = 1.0 C = numpy.linalg.cholesky(sigma) x = numpy.dot(C, x) + mu[:,numpy.newaxis] x = numpy.ascontiguousarray(x.T) for i in zero_columns: x[:,i] =0 return [x,w]
[ "def", "gauss_hermite_nodes", "(", "orders", ",", "sigma", ",", "mu", "=", "None", ")", ":", "if", "isinstance", "(", "orders", ",", "int", ")", ":", "orders", "=", "[", "orders", "]", "import", "numpy", "if", "mu", "is", "None", ":", "mu", "=", "numpy", ".", "array", "(", "[", "0", "]", "*", "sigma", ".", "shape", "[", "0", "]", ")", "herms", "=", "[", "hermgauss", "(", "i", ")", "for", "i", "in", "orders", "]", "points", "=", "[", "h", "[", "0", "]", "*", "numpy", ".", "sqrt", "(", "2", ")", "for", "h", "in", "herms", "]", "weights", "=", "[", "h", "[", "1", "]", "/", "numpy", ".", "sqrt", "(", "numpy", ".", "pi", ")", "for", "h", "in", "herms", "]", "if", "len", "(", "orders", ")", "==", "1", ":", "# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.", "# print(points.shape)", "x", "=", "numpy", ".", "array", "(", "points", "[", "0", "]", ")", "*", "numpy", ".", "sqrt", "(", "float", "(", "sigma", ")", ")", "if", "sigma", ".", "ndim", "==", "2", ":", "x", "=", "x", "[", ":", ",", "None", "]", "w", "=", "weights", "[", "0", "]", "return", "[", "x", ",", "w", "]", "else", ":", "x", "=", "cartesian", "(", "points", ")", ".", "T", "from", "functools", "import", "reduce", "w", "=", "reduce", "(", "numpy", ".", "kron", ",", "weights", ")", "zero_columns", "=", "numpy", ".", "where", "(", "sigma", ".", "sum", "(", "axis", "=", "0", ")", "==", "0", ")", "[", "0", "]", "for", "i", "in", "zero_columns", ":", "sigma", "[", "i", ",", "i", "]", "=", "1.0", "C", "=", "numpy", ".", "linalg", ".", "cholesky", "(", "sigma", ")", "x", "=", "numpy", ".", "dot", "(", "C", ",", "x", ")", "+", "mu", "[", ":", ",", "numpy", ".", "newaxis", "]", "x", "=", "numpy", ".", "ascontiguousarray", "(", "x", ".", "T", ")", "for", "i", "in", "zero_columns", ":", "x", "[", ":", ",", "i", "]", "=", "0", "return", "[", "x", ",", "w", "]" ]
Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights
[ "Computes", "the", "weights", "and", "nodes", "for", "Gauss", "Hermite", "quadrature", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/quadrature.py#L59-L122
237,911
EconForge/dolo
dolo/numeric/optimize/newton.py
newton
def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype='serial'): """Solve nonlinear system using safeguarded Newton iterations Parameters ---------- Return ------ """ if verbose: print = lambda txt: old_print(txt) else: print = lambda txt: None it = 0 error = 10 converged = False maxbacksteps = 30 x0 = x if jactype == 'sparse': from scipy.sparse.linalg import spsolve as solve elif jactype == 'full': from numpy.linalg import solve else: solve = serial_solve while it<maxit and not converged: [v,dv] = f(x) # TODO: rewrite starting here # print("Time to evaluate {}".format(ss-tt)0) error_0 = abs(v).max() if error_0 < tol: if verbose: print("> System was solved after iteration {}. Residual={}".format(it,error_0)) converged = True else: it += 1 dx = solve(dv, v) # norm_dx = abs(dx).max() for bck in range(maxbacksteps): xx = x - dx*(2**(-bck)) vm = f(xx)[0] err = abs(vm).max() if err < error_0: break x = xx if verbose: print("\t> {} | {} | {}".format(it, err, bck)) if not converged: import warnings warnings.warn("Did not converge") return [x, it]
python
def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype='serial'): """Solve nonlinear system using safeguarded Newton iterations Parameters ---------- Return ------ """ if verbose: print = lambda txt: old_print(txt) else: print = lambda txt: None it = 0 error = 10 converged = False maxbacksteps = 30 x0 = x if jactype == 'sparse': from scipy.sparse.linalg import spsolve as solve elif jactype == 'full': from numpy.linalg import solve else: solve = serial_solve while it<maxit and not converged: [v,dv] = f(x) # TODO: rewrite starting here # print("Time to evaluate {}".format(ss-tt)0) error_0 = abs(v).max() if error_0 < tol: if verbose: print("> System was solved after iteration {}. Residual={}".format(it,error_0)) converged = True else: it += 1 dx = solve(dv, v) # norm_dx = abs(dx).max() for bck in range(maxbacksteps): xx = x - dx*(2**(-bck)) vm = f(xx)[0] err = abs(vm).max() if err < error_0: break x = xx if verbose: print("\t> {} | {} | {}".format(it, err, bck)) if not converged: import warnings warnings.warn("Did not converge") return [x, it]
[ "def", "newton", "(", "f", ",", "x", ",", "verbose", "=", "False", ",", "tol", "=", "1e-6", ",", "maxit", "=", "5", ",", "jactype", "=", "'serial'", ")", ":", "if", "verbose", ":", "print", "=", "lambda", "txt", ":", "old_print", "(", "txt", ")", "else", ":", "print", "=", "lambda", "txt", ":", "None", "it", "=", "0", "error", "=", "10", "converged", "=", "False", "maxbacksteps", "=", "30", "x0", "=", "x", "if", "jactype", "==", "'sparse'", ":", "from", "scipy", ".", "sparse", ".", "linalg", "import", "spsolve", "as", "solve", "elif", "jactype", "==", "'full'", ":", "from", "numpy", ".", "linalg", "import", "solve", "else", ":", "solve", "=", "serial_solve", "while", "it", "<", "maxit", "and", "not", "converged", ":", "[", "v", ",", "dv", "]", "=", "f", "(", "x", ")", "# TODO: rewrite starting here", "# print(\"Time to evaluate {}\".format(ss-tt)0)", "error_0", "=", "abs", "(", "v", ")", ".", "max", "(", ")", "if", "error_0", "<", "tol", ":", "if", "verbose", ":", "print", "(", "\"> System was solved after iteration {}. Residual={}\"", ".", "format", "(", "it", ",", "error_0", ")", ")", "converged", "=", "True", "else", ":", "it", "+=", "1", "dx", "=", "solve", "(", "dv", ",", "v", ")", "# norm_dx = abs(dx).max()", "for", "bck", "in", "range", "(", "maxbacksteps", ")", ":", "xx", "=", "x", "-", "dx", "*", "(", "2", "**", "(", "-", "bck", ")", ")", "vm", "=", "f", "(", "xx", ")", "[", "0", "]", "err", "=", "abs", "(", "vm", ")", ".", "max", "(", ")", "if", "err", "<", "error_0", ":", "break", "x", "=", "xx", "if", "verbose", ":", "print", "(", "\"\\t> {} | {} | {}\"", ".", "format", "(", "it", ",", "err", ",", "bck", ")", ")", "if", "not", "converged", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"Did not converge\"", ")", "return", "[", "x", ",", "it", "]" ]
Solve nonlinear system using safeguarded Newton iterations Parameters ---------- Return ------
[ "Solve", "nonlinear", "system", "using", "safeguarded", "Newton", "iterations" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/optimize/newton.py#L81-L151
237,912
EconForge/dolo
dolo/numeric/extern/qz.py
qzordered
def qzordered(A,B,crit=1.0): "Eigenvalues bigger than crit are sorted in the top-left." TOL = 1e-10 def select(alpha, beta): return alpha**2>crit*beta**2 [S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select) eigval = abs(numpy.diag(S)/numpy.diag(T)) return [S,T,U,V,eigval]
python
def qzordered(A,B,crit=1.0): "Eigenvalues bigger than crit are sorted in the top-left." TOL = 1e-10 def select(alpha, beta): return alpha**2>crit*beta**2 [S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select) eigval = abs(numpy.diag(S)/numpy.diag(T)) return [S,T,U,V,eigval]
[ "def", "qzordered", "(", "A", ",", "B", ",", "crit", "=", "1.0", ")", ":", "TOL", "=", "1e-10", "def", "select", "(", "alpha", ",", "beta", ")", ":", "return", "alpha", "**", "2", ">", "crit", "*", "beta", "**", "2", "[", "S", ",", "T", ",", "alpha", ",", "beta", ",", "U", ",", "V", "]", "=", "ordqz", "(", "A", ",", "B", ",", "output", "=", "'real'", ",", "sort", "=", "select", ")", "eigval", "=", "abs", "(", "numpy", ".", "diag", "(", "S", ")", "/", "numpy", ".", "diag", "(", "T", ")", ")", "return", "[", "S", ",", "T", ",", "U", ",", "V", ",", "eigval", "]" ]
Eigenvalues bigger than crit are sorted in the top-left.
[ "Eigenvalues", "bigger", "than", "crit", "are", "sorted", "in", "the", "top", "-", "left", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/extern/qz.py#L6-L18
237,913
EconForge/dolo
dolo/numeric/extern/qz.py
ordqz
def ordqz(A, B, sort='lhp', output='real', overwrite_a=False, overwrite_b=False, check_finite=True): """ QZ decomposition for a pair of matrices with reordering. .. versionadded:: 0.17.0 Parameters ---------- A : (N, N) array_like 2d array to decompose B : (N, N) array_like 2d array to decompose sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). For real matrix pairs, the sort function takes three real arguments (alphar, alphai, beta). The eigenvalue ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or output='complex', the sort function takes two complex arguments (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) output : str {'real','complex'}, optional Construct the real or complex QZ decomposition for real matrices. Default is 'real'. overwrite_a : bool, optional If True, the contents of A are overwritten. overwrite_b : bool, optional If True, the contents of B are overwritten. check_finite : bool, optional If true checks the elements of `A` and `B` are finite numbers. If false does no checking and passes matrix through to underlying algorithm. Returns ------- AA : (N, N) ndarray Generalized Schur form of A. BB : (N, N) ndarray Generalized Schur form of B. alpha : (N,) ndarray alpha = alphar + alphai * 1j. See notes. beta : (N,) ndarray See notes. Q : (N, N) ndarray The left Schur vectors. Z : (N, N) ndarray The right Schur vectors. Notes ----- On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) that would result if the 2-by-2 diagonal blocks of the real generalized Schur form of (A,B) were further reduced to triangular form using complex unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex conjugate pair, with ``ALPHAI(j+1)`` negative. See also -------- qz """ import warnings import numpy as np from numpy import asarray_chkfinite from scipy.linalg.misc import LinAlgError, _datacopied from scipy.linalg.lapack import get_lapack_funcs from scipy._lib.six import callable from scipy.linalg._decomp_qz import _qz, _select_function #NOTE: should users be able to set these? lwork = None result, typ = _qz(A, B, output=output, lwork=lwork, sort=None, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite) AA, BB, Q, Z = result[0], result[1], result[-4], result[-3] if typ not in 'cz': alpha, beta = result[3] + result[4]*1.j, result[5] else: alpha, beta = result[3], result[4] sfunction = _select_function(sort) select = sfunction(alpha, beta) tgsen, = get_lapack_funcs(('tgsen',), (AA, BB)) if lwork is None or lwork == -1: result = tgsen(select, AA, BB, Q, Z, lwork=-1) lwork = result[-3][0].real.astype(np.int) # looks like wrong value passed to ZTGSYL if not lwork += 1 liwork = None if liwork is None or liwork == -1: result = tgsen(select, AA, BB, Q, Z, liwork=-1) liwork = result[-2][0] result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork) info = result[-1] if info < 0: raise ValueError("Illegal value in argument %d of tgsen" % -info) elif info == 1: raise ValueError("Reordering of (A, B) failed because the transformed" " matrix pair (A, B) would be too far from " "generalized Schur form; the problem is very " "ill-conditioned. (A, B) may have been partially " "reorded. If requested, 0 is returned in DIF(*), " "PL, and PR.") # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif, # work, iwork, info if typ in ['f', 'd']: alpha = result[2] + result[3] * 1.j return (result[0], result[1], alpha, result[4], result[5], result[6]) # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work, # iwork, info else: return result[0], result[1], result[2], result[3], result[4], result[5]
python
def ordqz(A, B, sort='lhp', output='real', overwrite_a=False, overwrite_b=False, check_finite=True): """ QZ decomposition for a pair of matrices with reordering. .. versionadded:: 0.17.0 Parameters ---------- A : (N, N) array_like 2d array to decompose B : (N, N) array_like 2d array to decompose sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). For real matrix pairs, the sort function takes three real arguments (alphar, alphai, beta). The eigenvalue ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or output='complex', the sort function takes two complex arguments (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) output : str {'real','complex'}, optional Construct the real or complex QZ decomposition for real matrices. Default is 'real'. overwrite_a : bool, optional If True, the contents of A are overwritten. overwrite_b : bool, optional If True, the contents of B are overwritten. check_finite : bool, optional If true checks the elements of `A` and `B` are finite numbers. If false does no checking and passes matrix through to underlying algorithm. Returns ------- AA : (N, N) ndarray Generalized Schur form of A. BB : (N, N) ndarray Generalized Schur form of B. alpha : (N,) ndarray alpha = alphar + alphai * 1j. See notes. beta : (N,) ndarray See notes. Q : (N, N) ndarray The left Schur vectors. Z : (N, N) ndarray The right Schur vectors. Notes ----- On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) that would result if the 2-by-2 diagonal blocks of the real generalized Schur form of (A,B) were further reduced to triangular form using complex unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex conjugate pair, with ``ALPHAI(j+1)`` negative. See also -------- qz """ import warnings import numpy as np from numpy import asarray_chkfinite from scipy.linalg.misc import LinAlgError, _datacopied from scipy.linalg.lapack import get_lapack_funcs from scipy._lib.six import callable from scipy.linalg._decomp_qz import _qz, _select_function #NOTE: should users be able to set these? lwork = None result, typ = _qz(A, B, output=output, lwork=lwork, sort=None, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite) AA, BB, Q, Z = result[0], result[1], result[-4], result[-3] if typ not in 'cz': alpha, beta = result[3] + result[4]*1.j, result[5] else: alpha, beta = result[3], result[4] sfunction = _select_function(sort) select = sfunction(alpha, beta) tgsen, = get_lapack_funcs(('tgsen',), (AA, BB)) if lwork is None or lwork == -1: result = tgsen(select, AA, BB, Q, Z, lwork=-1) lwork = result[-3][0].real.astype(np.int) # looks like wrong value passed to ZTGSYL if not lwork += 1 liwork = None if liwork is None or liwork == -1: result = tgsen(select, AA, BB, Q, Z, liwork=-1) liwork = result[-2][0] result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork) info = result[-1] if info < 0: raise ValueError("Illegal value in argument %d of tgsen" % -info) elif info == 1: raise ValueError("Reordering of (A, B) failed because the transformed" " matrix pair (A, B) would be too far from " "generalized Schur form; the problem is very " "ill-conditioned. (A, B) may have been partially " "reorded. If requested, 0 is returned in DIF(*), " "PL, and PR.") # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif, # work, iwork, info if typ in ['f', 'd']: alpha = result[2] + result[3] * 1.j return (result[0], result[1], alpha, result[4], result[5], result[6]) # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work, # iwork, info else: return result[0], result[1], result[2], result[3], result[4], result[5]
[ "def", "ordqz", "(", "A", ",", "B", ",", "sort", "=", "'lhp'", ",", "output", "=", "'real'", ",", "overwrite_a", "=", "False", ",", "overwrite_b", "=", "False", ",", "check_finite", "=", "True", ")", ":", "import", "warnings", "import", "numpy", "as", "np", "from", "numpy", "import", "asarray_chkfinite", "from", "scipy", ".", "linalg", ".", "misc", "import", "LinAlgError", ",", "_datacopied", "from", "scipy", ".", "linalg", ".", "lapack", "import", "get_lapack_funcs", "from", "scipy", ".", "_lib", ".", "six", "import", "callable", "from", "scipy", ".", "linalg", ".", "_decomp_qz", "import", "_qz", ",", "_select_function", "#NOTE: should users be able to set these?", "lwork", "=", "None", "result", ",", "typ", "=", "_qz", "(", "A", ",", "B", ",", "output", "=", "output", ",", "lwork", "=", "lwork", ",", "sort", "=", "None", ",", "overwrite_a", "=", "overwrite_a", ",", "overwrite_b", "=", "overwrite_b", ",", "check_finite", "=", "check_finite", ")", "AA", ",", "BB", ",", "Q", ",", "Z", "=", "result", "[", "0", "]", ",", "result", "[", "1", "]", ",", "result", "[", "-", "4", "]", ",", "result", "[", "-", "3", "]", "if", "typ", "not", "in", "'cz'", ":", "alpha", ",", "beta", "=", "result", "[", "3", "]", "+", "result", "[", "4", "]", "*", "1.j", ",", "result", "[", "5", "]", "else", ":", "alpha", ",", "beta", "=", "result", "[", "3", "]", ",", "result", "[", "4", "]", "sfunction", "=", "_select_function", "(", "sort", ")", "select", "=", "sfunction", "(", "alpha", ",", "beta", ")", "tgsen", ",", "=", "get_lapack_funcs", "(", "(", "'tgsen'", ",", ")", ",", "(", "AA", ",", "BB", ")", ")", "if", "lwork", "is", "None", "or", "lwork", "==", "-", "1", ":", "result", "=", "tgsen", "(", "select", ",", "AA", ",", "BB", ",", "Q", ",", "Z", ",", "lwork", "=", "-", "1", ")", "lwork", "=", "result", "[", "-", "3", "]", "[", "0", "]", ".", "real", ".", "astype", "(", "np", ".", "int", ")", "# looks like wrong value passed to ZTGSYL if not", "lwork", "+=", "1", "liwork", "=", "None", "if", "liwork", "is", "None", "or", "liwork", "==", "-", "1", ":", "result", "=", "tgsen", "(", "select", ",", "AA", ",", "BB", ",", "Q", ",", "Z", ",", "liwork", "=", "-", "1", ")", "liwork", "=", "result", "[", "-", "2", "]", "[", "0", "]", "result", "=", "tgsen", "(", "select", ",", "AA", ",", "BB", ",", "Q", ",", "Z", ",", "lwork", "=", "lwork", ",", "liwork", "=", "liwork", ")", "info", "=", "result", "[", "-", "1", "]", "if", "info", "<", "0", ":", "raise", "ValueError", "(", "\"Illegal value in argument %d of tgsen\"", "%", "-", "info", ")", "elif", "info", "==", "1", ":", "raise", "ValueError", "(", "\"Reordering of (A, B) failed because the transformed\"", "\" matrix pair (A, B) would be too far from \"", "\"generalized Schur form; the problem is very \"", "\"ill-conditioned. (A, B) may have been partially \"", "\"reorded. If requested, 0 is returned in DIF(*), \"", "\"PL, and PR.\"", ")", "# for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,", "# work, iwork, info", "if", "typ", "in", "[", "'f'", ",", "'d'", "]", ":", "alpha", "=", "result", "[", "2", "]", "+", "result", "[", "3", "]", "*", "1.j", "return", "(", "result", "[", "0", "]", ",", "result", "[", "1", "]", ",", "alpha", ",", "result", "[", "4", "]", ",", "result", "[", "5", "]", ",", "result", "[", "6", "]", ")", "# for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,", "# iwork, info", "else", ":", "return", "result", "[", "0", "]", ",", "result", "[", "1", "]", ",", "result", "[", "2", "]", ",", "result", "[", "3", "]", ",", "result", "[", "4", "]", ",", "result", "[", "5", "]" ]
QZ decomposition for a pair of matrices with reordering. .. versionadded:: 0.17.0 Parameters ---------- A : (N, N) array_like 2d array to decompose B : (N, N) array_like 2d array to decompose sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional Specifies whether the upper eigenvalues should be sorted. A callable may be passed that, given a eigenvalue, returns a boolean denoting whether the eigenvalue should be sorted to the top-left (True). For real matrix pairs, the sort function takes three real arguments (alphar, alphai, beta). The eigenvalue ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or output='complex', the sort function takes two complex arguments (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, string parameters may be used: - 'lhp' Left-hand plane (x.real < 0.0) - 'rhp' Right-hand plane (x.real > 0.0) - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) output : str {'real','complex'}, optional Construct the real or complex QZ decomposition for real matrices. Default is 'real'. overwrite_a : bool, optional If True, the contents of A are overwritten. overwrite_b : bool, optional If True, the contents of B are overwritten. check_finite : bool, optional If true checks the elements of `A` and `B` are finite numbers. If false does no checking and passes matrix through to underlying algorithm. Returns ------- AA : (N, N) ndarray Generalized Schur form of A. BB : (N, N) ndarray Generalized Schur form of B. alpha : (N,) ndarray alpha = alphar + alphai * 1j. See notes. beta : (N,) ndarray See notes. Q : (N, N) ndarray The left Schur vectors. Z : (N, N) ndarray The right Schur vectors. Notes ----- On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) that would result if the 2-by-2 diagonal blocks of the real generalized Schur form of (A,B) were further reduced to triangular form using complex unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex conjugate pair, with ``ALPHAI(j+1)`` negative. See also -------- qz
[ "QZ", "decomposition", "for", "a", "pair", "of", "matrices", "with", "reordering", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/extern/qz.py#L21-L154
237,914
EconForge/dolo
trash/dolo/algos/dtcscc/time_iteration_2.py
parameterized_expectations_direct
def parameterized_expectations_direct(model, verbose=False, initial_dr=None, pert_order=1, grid={}, distribution={}, maxit=100, tol=1e-8): ''' Finds a global solution for ``model`` using parameterized expectations function. Requires the model to be written with controls as a direct function of the model objects. The algorithm iterates on the expectations function in the arbitrage equation. It follows the discussion in section 9.9 of Miranda and Fackler (2002). Parameters ---------- model : NumericModel "dtcscc" model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid: grid options distribution: distribution options maxit: maximum number of iterations tol: tolerance criterium for successive approximations Returns ------- decision rule : approximated solution ''' t1 = time.time() g = model.functions['transition'] d = model.functions['direct_response'] h = model.functions['expectation'] parms = model.calibration['parameters'] if initial_dr is None: if pert_order == 1: initial_dr = approximate_controls(model) if pert_order > 1: raise Exception("Perturbation order > 1 not supported (yet).") approx = model.get_grid(**grid) grid = approx.grid interp_type = approx.interpolation dr = create_interpolator(approx, interp_type) expect = create_interpolator(approx, interp_type) distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() N = grid.shape[0] z = np.zeros((N, len(model.symbols['expectations']))) x_0 = initial_dr(grid) x_0 = x_0.real # just in case ... h_0 = h(grid, x_0, parms) it = 0 err = 10 err_0 = 10 if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline.format('N', ' Error', 'Gain', 'Time') stars = '-'*len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit: it += 1 t_start = time.time() # dr.set_values(x_0) expect.set_values(h_0) z[...] = 0 for i in range(weights.shape[0]): e = nodes[i, :] S = g(grid, x_0, e, parms) # evaluate expectation over the future state z += weights[i]*expect(S) # TODO: check that control is admissible new_x = d(grid, z, parms) new_h = h(grid, new_x, parms) # update error err = (abs(new_h - h_0).max()) # Update guess for decision rule and expectations function x_0 = new_x h_0 = new_h # print error information if `verbose` err_SA = err/err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) if it == maxit: import warnings warnings.warn(UserWarning("Maximum number of iterations reached")) # compute final fime and do final printout if `verbose` t2 = time.time() if verbose: print(stars) print('Elapsed: {} seconds.'.format(t2 - t1)) print(stars) # Interpolation for the decision rule dr.set_values(x_0) return dr
python
def parameterized_expectations_direct(model, verbose=False, initial_dr=None, pert_order=1, grid={}, distribution={}, maxit=100, tol=1e-8): ''' Finds a global solution for ``model`` using parameterized expectations function. Requires the model to be written with controls as a direct function of the model objects. The algorithm iterates on the expectations function in the arbitrage equation. It follows the discussion in section 9.9 of Miranda and Fackler (2002). Parameters ---------- model : NumericModel "dtcscc" model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid: grid options distribution: distribution options maxit: maximum number of iterations tol: tolerance criterium for successive approximations Returns ------- decision rule : approximated solution ''' t1 = time.time() g = model.functions['transition'] d = model.functions['direct_response'] h = model.functions['expectation'] parms = model.calibration['parameters'] if initial_dr is None: if pert_order == 1: initial_dr = approximate_controls(model) if pert_order > 1: raise Exception("Perturbation order > 1 not supported (yet).") approx = model.get_grid(**grid) grid = approx.grid interp_type = approx.interpolation dr = create_interpolator(approx, interp_type) expect = create_interpolator(approx, interp_type) distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() N = grid.shape[0] z = np.zeros((N, len(model.symbols['expectations']))) x_0 = initial_dr(grid) x_0 = x_0.real # just in case ... h_0 = h(grid, x_0, parms) it = 0 err = 10 err_0 = 10 if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline.format('N', ' Error', 'Gain', 'Time') stars = '-'*len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit: it += 1 t_start = time.time() # dr.set_values(x_0) expect.set_values(h_0) z[...] = 0 for i in range(weights.shape[0]): e = nodes[i, :] S = g(grid, x_0, e, parms) # evaluate expectation over the future state z += weights[i]*expect(S) # TODO: check that control is admissible new_x = d(grid, z, parms) new_h = h(grid, new_x, parms) # update error err = (abs(new_h - h_0).max()) # Update guess for decision rule and expectations function x_0 = new_x h_0 = new_h # print error information if `verbose` err_SA = err/err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) if it == maxit: import warnings warnings.warn(UserWarning("Maximum number of iterations reached")) # compute final fime and do final printout if `verbose` t2 = time.time() if verbose: print(stars) print('Elapsed: {} seconds.'.format(t2 - t1)) print(stars) # Interpolation for the decision rule dr.set_values(x_0) return dr
[ "def", "parameterized_expectations_direct", "(", "model", ",", "verbose", "=", "False", ",", "initial_dr", "=", "None", ",", "pert_order", "=", "1", ",", "grid", "=", "{", "}", ",", "distribution", "=", "{", "}", ",", "maxit", "=", "100", ",", "tol", "=", "1e-8", ")", ":", "t1", "=", "time", ".", "time", "(", ")", "g", "=", "model", ".", "functions", "[", "'transition'", "]", "d", "=", "model", ".", "functions", "[", "'direct_response'", "]", "h", "=", "model", ".", "functions", "[", "'expectation'", "]", "parms", "=", "model", ".", "calibration", "[", "'parameters'", "]", "if", "initial_dr", "is", "None", ":", "if", "pert_order", "==", "1", ":", "initial_dr", "=", "approximate_controls", "(", "model", ")", "if", "pert_order", ">", "1", ":", "raise", "Exception", "(", "\"Perturbation order > 1 not supported (yet).\"", ")", "approx", "=", "model", ".", "get_grid", "(", "*", "*", "grid", ")", "grid", "=", "approx", ".", "grid", "interp_type", "=", "approx", ".", "interpolation", "dr", "=", "create_interpolator", "(", "approx", ",", "interp_type", ")", "expect", "=", "create_interpolator", "(", "approx", ",", "interp_type", ")", "distrib", "=", "model", ".", "get_distribution", "(", "*", "*", "distribution", ")", "nodes", ",", "weights", "=", "distrib", ".", "discretize", "(", ")", "N", "=", "grid", ".", "shape", "[", "0", "]", "z", "=", "np", ".", "zeros", "(", "(", "N", ",", "len", "(", "model", ".", "symbols", "[", "'expectations'", "]", ")", ")", ")", "x_0", "=", "initial_dr", "(", "grid", ")", "x_0", "=", "x_0", ".", "real", "# just in case ...", "h_0", "=", "h", "(", "grid", ",", "x_0", ",", "parms", ")", "it", "=", "0", "err", "=", "10", "err_0", "=", "10", "if", "verbose", ":", "headline", "=", "'|{0:^4} | {1:10} | {2:8} | {3:8} |'", "headline", "=", "headline", ".", "format", "(", "'N'", ",", "' Error'", ",", "'Gain'", ",", "'Time'", ")", "stars", "=", "'-'", "*", "len", "(", "headline", ")", "print", "(", "stars", ")", "print", "(", "headline", ")", "print", "(", "stars", ")", "# format string for within loop", "fmt_str", "=", "'|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'", "while", "err", ">", "tol", "and", "it", "<=", "maxit", ":", "it", "+=", "1", "t_start", "=", "time", ".", "time", "(", ")", "# dr.set_values(x_0)", "expect", ".", "set_values", "(", "h_0", ")", "z", "[", "...", "]", "=", "0", "for", "i", "in", "range", "(", "weights", ".", "shape", "[", "0", "]", ")", ":", "e", "=", "nodes", "[", "i", ",", ":", "]", "S", "=", "g", "(", "grid", ",", "x_0", ",", "e", ",", "parms", ")", "# evaluate expectation over the future state", "z", "+=", "weights", "[", "i", "]", "*", "expect", "(", "S", ")", "# TODO: check that control is admissible", "new_x", "=", "d", "(", "grid", ",", "z", ",", "parms", ")", "new_h", "=", "h", "(", "grid", ",", "new_x", ",", "parms", ")", "# update error", "err", "=", "(", "abs", "(", "new_h", "-", "h_0", ")", ".", "max", "(", ")", ")", "# Update guess for decision rule and expectations function", "x_0", "=", "new_x", "h_0", "=", "new_h", "# print error information if `verbose`", "err_SA", "=", "err", "/", "err_0", "err_0", "=", "err", "t_finish", "=", "time", ".", "time", "(", ")", "elapsed", "=", "t_finish", "-", "t_start", "if", "verbose", ":", "print", "(", "fmt_str", ".", "format", "(", "it", ",", "err", ",", "err_SA", ",", "elapsed", ")", ")", "if", "it", "==", "maxit", ":", "import", "warnings", "warnings", ".", "warn", "(", "UserWarning", "(", "\"Maximum number of iterations reached\"", ")", ")", "# compute final fime and do final printout if `verbose`", "t2", "=", "time", ".", "time", "(", ")", "if", "verbose", ":", "print", "(", "stars", ")", "print", "(", "'Elapsed: {} seconds.'", ".", "format", "(", "t2", "-", "t1", ")", ")", "print", "(", "stars", ")", "# Interpolation for the decision rule", "dr", ".", "set_values", "(", "x_0", ")", "return", "dr" ]
Finds a global solution for ``model`` using parameterized expectations function. Requires the model to be written with controls as a direct function of the model objects. The algorithm iterates on the expectations function in the arbitrage equation. It follows the discussion in section 9.9 of Miranda and Fackler (2002). Parameters ---------- model : NumericModel "dtcscc" model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid: grid options distribution: distribution options maxit: maximum number of iterations tol: tolerance criterium for successive approximations Returns ------- decision rule : approximated solution
[ "Finds", "a", "global", "solution", "for", "model", "using", "parameterized", "expectations", "function", ".", "Requires", "the", "model", "to", "be", "written", "with", "controls", "as", "a", "direct", "function", "of", "the", "model", "objects", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/algos/dtcscc/time_iteration_2.py#L186-L312
237,915
EconForge/dolo
dolo/compiler/misc.py
numdiff
def numdiff(fun, args): """Vectorized numerical differentiation""" # vectorized version epsilon = 1e-8 args = list(args) v0 = fun(*args) N = v0.shape[0] l_v = len(v0) dvs = [] for i, a in enumerate(args): l_a = (a).shape[1] dv = numpy.zeros((N, l_v, l_a)) nargs = list(args) #.copy() for j in range(l_a): xx = args[i].copy() xx[:, j] += epsilon nargs[i] = xx dv[:, :, j] = (fun(*nargs) - v0) / epsilon dvs.append(dv) return [v0] + dvs
python
def numdiff(fun, args): """Vectorized numerical differentiation""" # vectorized version epsilon = 1e-8 args = list(args) v0 = fun(*args) N = v0.shape[0] l_v = len(v0) dvs = [] for i, a in enumerate(args): l_a = (a).shape[1] dv = numpy.zeros((N, l_v, l_a)) nargs = list(args) #.copy() for j in range(l_a): xx = args[i].copy() xx[:, j] += epsilon nargs[i] = xx dv[:, :, j] = (fun(*nargs) - v0) / epsilon dvs.append(dv) return [v0] + dvs
[ "def", "numdiff", "(", "fun", ",", "args", ")", ":", "# vectorized version", "epsilon", "=", "1e-8", "args", "=", "list", "(", "args", ")", "v0", "=", "fun", "(", "*", "args", ")", "N", "=", "v0", ".", "shape", "[", "0", "]", "l_v", "=", "len", "(", "v0", ")", "dvs", "=", "[", "]", "for", "i", ",", "a", "in", "enumerate", "(", "args", ")", ":", "l_a", "=", "(", "a", ")", ".", "shape", "[", "1", "]", "dv", "=", "numpy", ".", "zeros", "(", "(", "N", ",", "l_v", ",", "l_a", ")", ")", "nargs", "=", "list", "(", "args", ")", "#.copy()", "for", "j", "in", "range", "(", "l_a", ")", ":", "xx", "=", "args", "[", "i", "]", ".", "copy", "(", ")", "xx", "[", ":", ",", "j", "]", "+=", "epsilon", "nargs", "[", "i", "]", "=", "xx", "dv", "[", ":", ",", ":", ",", "j", "]", "=", "(", "fun", "(", "*", "nargs", ")", "-", "v0", ")", "/", "epsilon", "dvs", ".", "append", "(", "dv", ")", "return", "[", "v0", "]", "+", "dvs" ]
Vectorized numerical differentiation
[ "Vectorized", "numerical", "differentiation" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/misc.py#L97-L118
237,916
EconForge/dolo
dolo/numeric/filters.py
bandpass_filter
def bandpass_filter(data, k, w1, w2): """ This function will apply a bandpass filter to data. It will be kth order and will select the band between w1 and w2. Parameters ---------- data: array, dtype=float The data you wish to filter k: number, int The order of approximation for the filter. A max value for this isdata.size/2 w1: number, float This is the lower bound for which frequencies will pass through. w2: number, float This is the upper bound for which frequencies will pass through. Returns ------- y: array, dtype=float The filtered data. """ data = np.asarray(data) low_w = np.pi * 2 / w2 high_w = np.pi * 2 / w1 bweights = np.zeros(2 * k + 1) bweights[k] = (high_w - low_w) / np.pi j = np.arange(1, int(k) + 1) weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j)) bweights[k + j] = weights bweights[:k] = weights[::-1] bweights -= bweights.mean() return fftconvolve(bweights, data, mode='valid')
python
def bandpass_filter(data, k, w1, w2): """ This function will apply a bandpass filter to data. It will be kth order and will select the band between w1 and w2. Parameters ---------- data: array, dtype=float The data you wish to filter k: number, int The order of approximation for the filter. A max value for this isdata.size/2 w1: number, float This is the lower bound for which frequencies will pass through. w2: number, float This is the upper bound for which frequencies will pass through. Returns ------- y: array, dtype=float The filtered data. """ data = np.asarray(data) low_w = np.pi * 2 / w2 high_w = np.pi * 2 / w1 bweights = np.zeros(2 * k + 1) bweights[k] = (high_w - low_w) / np.pi j = np.arange(1, int(k) + 1) weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j)) bweights[k + j] = weights bweights[:k] = weights[::-1] bweights -= bweights.mean() return fftconvolve(bweights, data, mode='valid')
[ "def", "bandpass_filter", "(", "data", ",", "k", ",", "w1", ",", "w2", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "low_w", "=", "np", ".", "pi", "*", "2", "/", "w2", "high_w", "=", "np", ".", "pi", "*", "2", "/", "w1", "bweights", "=", "np", ".", "zeros", "(", "2", "*", "k", "+", "1", ")", "bweights", "[", "k", "]", "=", "(", "high_w", "-", "low_w", ")", "/", "np", ".", "pi", "j", "=", "np", ".", "arange", "(", "1", ",", "int", "(", "k", ")", "+", "1", ")", "weights", "=", "1", "/", "(", "np", ".", "pi", "*", "j", ")", "*", "(", "sin", "(", "high_w", "*", "j", ")", "-", "sin", "(", "low_w", "*", "j", ")", ")", "bweights", "[", "k", "+", "j", "]", "=", "weights", "bweights", "[", ":", "k", "]", "=", "weights", "[", ":", ":", "-", "1", "]", "bweights", "-=", "bweights", ".", "mean", "(", ")", "return", "fftconvolve", "(", "bweights", ",", "data", ",", "mode", "=", "'valid'", ")" ]
This function will apply a bandpass filter to data. It will be kth order and will select the band between w1 and w2. Parameters ---------- data: array, dtype=float The data you wish to filter k: number, int The order of approximation for the filter. A max value for this isdata.size/2 w1: number, float This is the lower bound for which frequencies will pass through. w2: number, float This is the upper bound for which frequencies will pass through. Returns ------- y: array, dtype=float The filtered data.
[ "This", "function", "will", "apply", "a", "bandpass", "filter", "to", "data", ".", "It", "will", "be", "kth", "order", "and", "will", "select", "the", "band", "between", "w1", "and", "w2", "." ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/filters.py#L83-L119
237,917
EconForge/dolo
dolo/misc/dprint.py
dprint
def dprint(s): '''Prints `s` with additional debugging informations''' import inspect frameinfo = inspect.stack()[1] callerframe = frameinfo.frame d = callerframe.f_locals if (isinstance(s,str)): val = eval(s, d) else: val = s cc = frameinfo.code_context[0] import re regex = re.compile("dprint\((.*)\)") res = regex.search(cc) s = res.group(1) text = '' text += bcolors.OKBLUE + "At <{}>\n".format(str(frameinfo)) + bcolors.ENDC text += bcolors.WARNING + "{}: ".format(s) + bcolors.ENDC text += str(val) text += str() print(text)
python
def dprint(s): '''Prints `s` with additional debugging informations''' import inspect frameinfo = inspect.stack()[1] callerframe = frameinfo.frame d = callerframe.f_locals if (isinstance(s,str)): val = eval(s, d) else: val = s cc = frameinfo.code_context[0] import re regex = re.compile("dprint\((.*)\)") res = regex.search(cc) s = res.group(1) text = '' text += bcolors.OKBLUE + "At <{}>\n".format(str(frameinfo)) + bcolors.ENDC text += bcolors.WARNING + "{}: ".format(s) + bcolors.ENDC text += str(val) text += str() print(text)
[ "def", "dprint", "(", "s", ")", ":", "import", "inspect", "frameinfo", "=", "inspect", ".", "stack", "(", ")", "[", "1", "]", "callerframe", "=", "frameinfo", ".", "frame", "d", "=", "callerframe", ".", "f_locals", "if", "(", "isinstance", "(", "s", ",", "str", ")", ")", ":", "val", "=", "eval", "(", "s", ",", "d", ")", "else", ":", "val", "=", "s", "cc", "=", "frameinfo", ".", "code_context", "[", "0", "]", "import", "re", "regex", "=", "re", ".", "compile", "(", "\"dprint\\((.*)\\)\"", ")", "res", "=", "regex", ".", "search", "(", "cc", ")", "s", "=", "res", ".", "group", "(", "1", ")", "text", "=", "''", "text", "+=", "bcolors", ".", "OKBLUE", "+", "\"At <{}>\\n\"", ".", "format", "(", "str", "(", "frameinfo", ")", ")", "+", "bcolors", ".", "ENDC", "text", "+=", "bcolors", ".", "WARNING", "+", "\"{}: \"", ".", "format", "(", "s", ")", "+", "bcolors", ".", "ENDC", "text", "+=", "str", "(", "val", ")", "text", "+=", "str", "(", ")", "print", "(", "text", ")" ]
Prints `s` with additional debugging informations
[ "Prints", "s", "with", "additional", "debugging", "informations" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/misc/dprint.py#L21-L46
237,918
EconForge/dolo
dolo/compiler/function_compiler_sympy.py
non_decreasing_series
def non_decreasing_series(n, size): '''Lists all combinations of 0,...,n-1 in increasing order''' if size == 1: return [[a] for a in range(n)] else: lc = non_decreasing_series(n, size-1) ll = [] for l in lc: last = l[-1] for i in range(last, n): e = l + [i] ll.append(e) return ll
python
def non_decreasing_series(n, size): '''Lists all combinations of 0,...,n-1 in increasing order''' if size == 1: return [[a] for a in range(n)] else: lc = non_decreasing_series(n, size-1) ll = [] for l in lc: last = l[-1] for i in range(last, n): e = l + [i] ll.append(e) return ll
[ "def", "non_decreasing_series", "(", "n", ",", "size", ")", ":", "if", "size", "==", "1", ":", "return", "[", "[", "a", "]", "for", "a", "in", "range", "(", "n", ")", "]", "else", ":", "lc", "=", "non_decreasing_series", "(", "n", ",", "size", "-", "1", ")", "ll", "=", "[", "]", "for", "l", "in", "lc", ":", "last", "=", "l", "[", "-", "1", "]", "for", "i", "in", "range", "(", "last", ",", "n", ")", ":", "e", "=", "l", "+", "[", "i", "]", "ll", ".", "append", "(", "e", ")", "return", "ll" ]
Lists all combinations of 0,...,n-1 in increasing order
[ "Lists", "all", "combinations", "of", "0", "...", "n", "-", "1", "in", "increasing", "order" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L13-L26
237,919
EconForge/dolo
dolo/compiler/function_compiler_sympy.py
higher_order_diff
def higher_order_diff(eqs, syms, order=2): '''Takes higher order derivatives of a list of equations w.r.t a list of paramters''' import numpy eqs = list([sympy.sympify(eq) for eq in eqs]) syms = list([sympy.sympify(s) for s in syms]) neq = len(eqs) p = len(syms) D = [numpy.array(eqs)] orders = [] for i in range(1,order+1): par = D[i-1] mat = numpy.empty([neq] + [p]*i, dtype=object) #.append( numpy.zeros(orders)) for ind in non_decreasing_series(p,i): ind_parent = ind[:-1] k = ind[-1] for line in range(neq): ii = [line] + ind iid = [line] + ind_parent eeq = par[ tuple(iid) ] mat[tuple(ii)] = eeq.diff(syms[k]) D.append(mat) return D
python
def higher_order_diff(eqs, syms, order=2): '''Takes higher order derivatives of a list of equations w.r.t a list of paramters''' import numpy eqs = list([sympy.sympify(eq) for eq in eqs]) syms = list([sympy.sympify(s) for s in syms]) neq = len(eqs) p = len(syms) D = [numpy.array(eqs)] orders = [] for i in range(1,order+1): par = D[i-1] mat = numpy.empty([neq] + [p]*i, dtype=object) #.append( numpy.zeros(orders)) for ind in non_decreasing_series(p,i): ind_parent = ind[:-1] k = ind[-1] for line in range(neq): ii = [line] + ind iid = [line] + ind_parent eeq = par[ tuple(iid) ] mat[tuple(ii)] = eeq.diff(syms[k]) D.append(mat) return D
[ "def", "higher_order_diff", "(", "eqs", ",", "syms", ",", "order", "=", "2", ")", ":", "import", "numpy", "eqs", "=", "list", "(", "[", "sympy", ".", "sympify", "(", "eq", ")", "for", "eq", "in", "eqs", "]", ")", "syms", "=", "list", "(", "[", "sympy", ".", "sympify", "(", "s", ")", "for", "s", "in", "syms", "]", ")", "neq", "=", "len", "(", "eqs", ")", "p", "=", "len", "(", "syms", ")", "D", "=", "[", "numpy", ".", "array", "(", "eqs", ")", "]", "orders", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "order", "+", "1", ")", ":", "par", "=", "D", "[", "i", "-", "1", "]", "mat", "=", "numpy", ".", "empty", "(", "[", "neq", "]", "+", "[", "p", "]", "*", "i", ",", "dtype", "=", "object", ")", "#.append( numpy.zeros(orders))", "for", "ind", "in", "non_decreasing_series", "(", "p", ",", "i", ")", ":", "ind_parent", "=", "ind", "[", ":", "-", "1", "]", "k", "=", "ind", "[", "-", "1", "]", "for", "line", "in", "range", "(", "neq", ")", ":", "ii", "=", "[", "line", "]", "+", "ind", "iid", "=", "[", "line", "]", "+", "ind_parent", "eeq", "=", "par", "[", "tuple", "(", "iid", ")", "]", "mat", "[", "tuple", "(", "ii", ")", "]", "=", "eeq", ".", "diff", "(", "syms", "[", "k", "]", ")", "D", ".", "append", "(", "mat", ")", "return", "D" ]
Takes higher order derivatives of a list of equations w.r.t a list of paramters
[ "Takes", "higher", "order", "derivatives", "of", "a", "list", "of", "equations", "w", ".", "r", ".", "t", "a", "list", "of", "paramters" ]
d91ddf148b009bf79852d9aec70f3a1877e0f79a
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L28-L60
237,920
pokerregion/poker
poker/website/pocketfives.py
get_ranked_players
def get_ranked_players(): """Get the list of the first 100 ranked players.""" rankings_page = requests.get(RANKINGS_URL) root = etree.HTML(rankings_page.text) player_rows = root.xpath('//div[@id="ranked"]//tr') for row in player_rows[1:]: player_row = row.xpath('td[@class!="country"]//text()') yield _Player( name=player_row[1], country=row[1][0].get('title'), triple_crowns=player_row[3], monthly_win=player_row[4], biggest_cash=player_row[5], plb_score=player_row[6], biggest_score=player_row[7], average_score=player_row[8], previous_rank=player_row[9], )
python
def get_ranked_players(): """Get the list of the first 100 ranked players.""" rankings_page = requests.get(RANKINGS_URL) root = etree.HTML(rankings_page.text) player_rows = root.xpath('//div[@id="ranked"]//tr') for row in player_rows[1:]: player_row = row.xpath('td[@class!="country"]//text()') yield _Player( name=player_row[1], country=row[1][0].get('title'), triple_crowns=player_row[3], monthly_win=player_row[4], biggest_cash=player_row[5], plb_score=player_row[6], biggest_score=player_row[7], average_score=player_row[8], previous_rank=player_row[9], )
[ "def", "get_ranked_players", "(", ")", ":", "rankings_page", "=", "requests", ".", "get", "(", "RANKINGS_URL", ")", "root", "=", "etree", ".", "HTML", "(", "rankings_page", ".", "text", ")", "player_rows", "=", "root", ".", "xpath", "(", "'//div[@id=\"ranked\"]//tr'", ")", "for", "row", "in", "player_rows", "[", "1", ":", "]", ":", "player_row", "=", "row", ".", "xpath", "(", "'td[@class!=\"country\"]//text()'", ")", "yield", "_Player", "(", "name", "=", "player_row", "[", "1", "]", ",", "country", "=", "row", "[", "1", "]", "[", "0", "]", ".", "get", "(", "'title'", ")", ",", "triple_crowns", "=", "player_row", "[", "3", "]", ",", "monthly_win", "=", "player_row", "[", "4", "]", ",", "biggest_cash", "=", "player_row", "[", "5", "]", ",", "plb_score", "=", "player_row", "[", "6", "]", ",", "biggest_score", "=", "player_row", "[", "7", "]", ",", "average_score", "=", "player_row", "[", "8", "]", ",", "previous_rank", "=", "player_row", "[", "9", "]", ",", ")" ]
Get the list of the first 100 ranked players.
[ "Get", "the", "list", "of", "the", "first", "100", "ranked", "players", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/pocketfives.py#L31-L50
237,921
pokerregion/poker
poker/card.py
Rank.difference
def difference(cls, first, second): """Tells the numerical difference between two ranks.""" # so we always get a Rank instance even if string were passed in first, second = cls(first), cls(second) rank_list = list(cls) return abs(rank_list.index(first) - rank_list.index(second))
python
def difference(cls, first, second): """Tells the numerical difference between two ranks.""" # so we always get a Rank instance even if string were passed in first, second = cls(first), cls(second) rank_list = list(cls) return abs(rank_list.index(first) - rank_list.index(second))
[ "def", "difference", "(", "cls", ",", "first", ",", "second", ")", ":", "# so we always get a Rank instance even if string were passed in", "first", ",", "second", "=", "cls", "(", "first", ")", ",", "cls", "(", "second", ")", "rank_list", "=", "list", "(", "cls", ")", "return", "abs", "(", "rank_list", ".", "index", "(", "first", ")", "-", "rank_list", ".", "index", "(", "second", ")", ")" ]
Tells the numerical difference between two ranks.
[ "Tells", "the", "numerical", "difference", "between", "two", "ranks", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/card.py#L42-L48
237,922
pokerregion/poker
poker/card.py
_CardMeta.make_random
def make_random(cls): """Returns a random Card instance.""" self = object.__new__(cls) self.rank = Rank.make_random() self.suit = Suit.make_random() return self
python
def make_random(cls): """Returns a random Card instance.""" self = object.__new__(cls) self.rank = Rank.make_random() self.suit = Suit.make_random() return self
[ "def", "make_random", "(", "cls", ")", ":", "self", "=", "object", ".", "__new__", "(", "cls", ")", "self", ".", "rank", "=", "Rank", ".", "make_random", "(", ")", "self", ".", "suit", "=", "Suit", ".", "make_random", "(", ")", "return", "self" ]
Returns a random Card instance.
[ "Returns", "a", "random", "Card", "instance", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/card.py#L64-L69
237,923
pokerregion/poker
poker/commands.py
twoplustwo_player
def twoplustwo_player(username): """Get profile information about a Two plus Two Forum member given the username.""" from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError try: member = ForumMember(username) except UserNotFoundError: raise click.ClickException('User "%s" not found!' % username) except AmbiguousUserNameError as e: click.echo('Got multiple users with similar names!', err=True) for ind, user in enumerate(e.users): click.echo('{}. {}'.format(ind + 1, user.name), err=True) number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)), prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True) userid = e.users[int(number) - 1].id member = ForumMember.from_userid(userid) click.echo(err=True) # empty line after input _print_header('Two plus two forum member') _print_values( ('Username', member.username), ('Forum id', member.id), ('Location', member.location), ('Total posts', member.total_posts), ('Posts per day', member.posts_per_day), ('Rank', member.rank), ('Last activity', member.last_activity), ('Join date', member.join_date), ('Usergroups', member.public_usergroups), ('Profile picture', member.profile_picture), ('Avatar', member.avatar), )
python
def twoplustwo_player(username): """Get profile information about a Two plus Two Forum member given the username.""" from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError try: member = ForumMember(username) except UserNotFoundError: raise click.ClickException('User "%s" not found!' % username) except AmbiguousUserNameError as e: click.echo('Got multiple users with similar names!', err=True) for ind, user in enumerate(e.users): click.echo('{}. {}'.format(ind + 1, user.name), err=True) number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)), prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True) userid = e.users[int(number) - 1].id member = ForumMember.from_userid(userid) click.echo(err=True) # empty line after input _print_header('Two plus two forum member') _print_values( ('Username', member.username), ('Forum id', member.id), ('Location', member.location), ('Total posts', member.total_posts), ('Posts per day', member.posts_per_day), ('Rank', member.rank), ('Last activity', member.last_activity), ('Join date', member.join_date), ('Usergroups', member.public_usergroups), ('Profile picture', member.profile_picture), ('Avatar', member.avatar), )
[ "def", "twoplustwo_player", "(", "username", ")", ":", "from", ".", "website", ".", "twoplustwo", "import", "ForumMember", ",", "AmbiguousUserNameError", ",", "UserNotFoundError", "try", ":", "member", "=", "ForumMember", "(", "username", ")", "except", "UserNotFoundError", ":", "raise", "click", ".", "ClickException", "(", "'User \"%s\" not found!'", "%", "username", ")", "except", "AmbiguousUserNameError", "as", "e", ":", "click", ".", "echo", "(", "'Got multiple users with similar names!'", ",", "err", "=", "True", ")", "for", "ind", ",", "user", "in", "enumerate", "(", "e", ".", "users", ")", ":", "click", ".", "echo", "(", "'{}. {}'", ".", "format", "(", "ind", "+", "1", ",", "user", ".", "name", ")", ",", "err", "=", "True", ")", "number", "=", "click", ".", "prompt", "(", "'Which would you like to see [{}-{}]'", ".", "format", "(", "1", ",", "len", "(", "e", ".", "users", ")", ")", ",", "prompt_suffix", "=", "'? '", ",", "type", "=", "click", ".", "IntRange", "(", "1", ",", "len", "(", "e", ".", "users", ")", ")", ",", "err", "=", "True", ")", "userid", "=", "e", ".", "users", "[", "int", "(", "number", ")", "-", "1", "]", ".", "id", "member", "=", "ForumMember", ".", "from_userid", "(", "userid", ")", "click", ".", "echo", "(", "err", "=", "True", ")", "# empty line after input", "_print_header", "(", "'Two plus two forum member'", ")", "_print_values", "(", "(", "'Username'", ",", "member", ".", "username", ")", ",", "(", "'Forum id'", ",", "member", ".", "id", ")", ",", "(", "'Location'", ",", "member", ".", "location", ")", ",", "(", "'Total posts'", ",", "member", ".", "total_posts", ")", ",", "(", "'Posts per day'", ",", "member", ".", "posts_per_day", ")", ",", "(", "'Rank'", ",", "member", ".", "rank", ")", ",", "(", "'Last activity'", ",", "member", ".", "last_activity", ")", ",", "(", "'Join date'", ",", "member", ".", "join_date", ")", ",", "(", "'Usergroups'", ",", "member", ".", "public_usergroups", ")", ",", "(", "'Profile picture'", ",", "member", ".", "profile_picture", ")", ",", "(", "'Avatar'", ",", "member", ".", "avatar", ")", ",", ")" ]
Get profile information about a Two plus Two Forum member given the username.
[ "Get", "profile", "information", "about", "a", "Two", "plus", "Two", "Forum", "member", "given", "the", "username", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L59-L95
237,924
pokerregion/poker
poker/commands.py
p5list
def p5list(num): """List pocketfives ranked players, max 100 if no NUM, or NUM if specified.""" from .website.pocketfives import get_ranked_players format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'\ '{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}' click.echo(format_str.format( 'Rank' , 'Player name', 'Country', 'Triple', 'Monthly', 'Biggest cash', 'PLB score', 'Biggest s', 'Average s', 'Prev' )) # just generate the appropriate number of underlines and cut them with format_str underlines = ['-' * 20] * 10 click.echo(format_str.format(*underlines)) for ind, player in enumerate(get_ranked_players()): click.echo(format_str.format(str(ind + 1) + '.', *player)) if ind == num - 1: break
python
def p5list(num): """List pocketfives ranked players, max 100 if no NUM, or NUM if specified.""" from .website.pocketfives import get_ranked_players format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'\ '{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}' click.echo(format_str.format( 'Rank' , 'Player name', 'Country', 'Triple', 'Monthly', 'Biggest cash', 'PLB score', 'Biggest s', 'Average s', 'Prev' )) # just generate the appropriate number of underlines and cut them with format_str underlines = ['-' * 20] * 10 click.echo(format_str.format(*underlines)) for ind, player in enumerate(get_ranked_players()): click.echo(format_str.format(str(ind + 1) + '.', *player)) if ind == num - 1: break
[ "def", "p5list", "(", "num", ")", ":", "from", ".", "website", ".", "pocketfives", "import", "get_ranked_players", "format_str", "=", "'{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'", "'{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}'", "click", ".", "echo", "(", "format_str", ".", "format", "(", "'Rank'", ",", "'Player name'", ",", "'Country'", ",", "'Triple'", ",", "'Monthly'", ",", "'Biggest cash'", ",", "'PLB score'", ",", "'Biggest s'", ",", "'Average s'", ",", "'Prev'", ")", ")", "# just generate the appropriate number of underlines and cut them with format_str", "underlines", "=", "[", "'-'", "*", "20", "]", "*", "10", "click", ".", "echo", "(", "format_str", ".", "format", "(", "*", "underlines", ")", ")", "for", "ind", ",", "player", "in", "enumerate", "(", "get_ranked_players", "(", ")", ")", ":", "click", ".", "echo", "(", "format_str", ".", "format", "(", "str", "(", "ind", "+", "1", ")", "+", "'.'", ",", "*", "player", ")", ")", "if", "ind", "==", "num", "-", "1", ":", "break" ]
List pocketfives ranked players, max 100 if no NUM, or NUM if specified.
[ "List", "pocketfives", "ranked", "players", "max", "100", "if", "no", "NUM", "or", "NUM", "if", "specified", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L100-L119
237,925
pokerregion/poker
poker/commands.py
psstatus
def psstatus(): """Shows PokerStars status such as number of players, tournaments.""" from .website.pokerstars import get_status _print_header('PokerStars status') status = get_status() _print_values( ('Info updated', status.updated), ('Tables', status.tables), ('Players', status.players), ('Active tournaments', status.active_tournaments), ('Total tournaments', status.total_tournaments), ('Clubs', status.clubs), ('Club members', status.club_members), ) site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}' click.echo('\nSite Tables Players Tournaments') click.echo('----------- ------ ------- -----------') for site in status.sites: click.echo(site_format_str.format(site))
python
def psstatus(): """Shows PokerStars status such as number of players, tournaments.""" from .website.pokerstars import get_status _print_header('PokerStars status') status = get_status() _print_values( ('Info updated', status.updated), ('Tables', status.tables), ('Players', status.players), ('Active tournaments', status.active_tournaments), ('Total tournaments', status.total_tournaments), ('Clubs', status.clubs), ('Club members', status.club_members), ) site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}' click.echo('\nSite Tables Players Tournaments') click.echo('----------- ------ ------- -----------') for site in status.sites: click.echo(site_format_str.format(site))
[ "def", "psstatus", "(", ")", ":", "from", ".", "website", ".", "pokerstars", "import", "get_status", "_print_header", "(", "'PokerStars status'", ")", "status", "=", "get_status", "(", ")", "_print_values", "(", "(", "'Info updated'", ",", "status", ".", "updated", ")", ",", "(", "'Tables'", ",", "status", ".", "tables", ")", ",", "(", "'Players'", ",", "status", ".", "players", ")", ",", "(", "'Active tournaments'", ",", "status", ".", "active_tournaments", ")", ",", "(", "'Total tournaments'", ",", "status", ".", "total_tournaments", ")", ",", "(", "'Clubs'", ",", "status", ".", "clubs", ")", ",", "(", "'Club members'", ",", "status", ".", "club_members", ")", ",", ")", "site_format_str", "=", "'{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'", "click", ".", "echo", "(", "'\\nSite Tables Players Tournaments'", ")", "click", ".", "echo", "(", "'----------- ------ ------- -----------'", ")", "for", "site", "in", "status", ".", "sites", ":", "click", ".", "echo", "(", "site_format_str", ".", "format", "(", "site", ")", ")" ]
Shows PokerStars status such as number of players, tournaments.
[ "Shows", "PokerStars", "status", "such", "as", "number", "of", "players", "tournaments", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L123-L145
237,926
pokerregion/poker
poker/room/pokerstars.py
Notes.notes
def notes(self): """Tuple of notes..""" return tuple(self._get_note_data(note) for note in self.root.iter('note'))
python
def notes(self): """Tuple of notes..""" return tuple(self._get_note_data(note) for note in self.root.iter('note'))
[ "def", "notes", "(", "self", ")", ":", "return", "tuple", "(", "self", ".", "_get_note_data", "(", "note", ")", "for", "note", "in", "self", ".", "root", ".", "iter", "(", "'note'", ")", ")" ]
Tuple of notes..
[ "Tuple", "of", "notes", ".." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L335-L337
237,927
pokerregion/poker
poker/room/pokerstars.py
Notes.labels
def labels(self): """Tuple of labels.""" return tuple(_Label(label.get('id'), label.get('color'), label.text) for label in self.root.iter('label'))
python
def labels(self): """Tuple of labels.""" return tuple(_Label(label.get('id'), label.get('color'), label.text) for label in self.root.iter('label'))
[ "def", "labels", "(", "self", ")", ":", "return", "tuple", "(", "_Label", "(", "label", ".", "get", "(", "'id'", ")", ",", "label", ".", "get", "(", "'color'", ")", ",", "label", ".", "text", ")", "for", "label", "in", "self", ".", "root", ".", "iter", "(", "'label'", ")", ")" ]
Tuple of labels.
[ "Tuple", "of", "labels", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L340-L343
237,928
pokerregion/poker
poker/room/pokerstars.py
Notes.add_note
def add_note(self, player, text, label=None, update=None): """Add a note to the xml. If update param is None, it will be the current time.""" if label is not None and (label not in self.label_names): raise LabelNotFoundError('Invalid label: {}'.format(label)) if update is None: update = datetime.utcnow() # converted to timestamp, rounded to ones update = update.strftime('%s') label_id = self._get_label_id(label) new_note = etree.Element('note', player=player, label=label_id, update=update) new_note.text = text self.root.append(new_note)
python
def add_note(self, player, text, label=None, update=None): """Add a note to the xml. If update param is None, it will be the current time.""" if label is not None and (label not in self.label_names): raise LabelNotFoundError('Invalid label: {}'.format(label)) if update is None: update = datetime.utcnow() # converted to timestamp, rounded to ones update = update.strftime('%s') label_id = self._get_label_id(label) new_note = etree.Element('note', player=player, label=label_id, update=update) new_note.text = text self.root.append(new_note)
[ "def", "add_note", "(", "self", ",", "player", ",", "text", ",", "label", "=", "None", ",", "update", "=", "None", ")", ":", "if", "label", "is", "not", "None", "and", "(", "label", "not", "in", "self", ".", "label_names", ")", ":", "raise", "LabelNotFoundError", "(", "'Invalid label: {}'", ".", "format", "(", "label", ")", ")", "if", "update", "is", "None", ":", "update", "=", "datetime", ".", "utcnow", "(", ")", "# converted to timestamp, rounded to ones", "update", "=", "update", ".", "strftime", "(", "'%s'", ")", "label_id", "=", "self", ".", "_get_label_id", "(", "label", ")", "new_note", "=", "etree", ".", "Element", "(", "'note'", ",", "player", "=", "player", ",", "label", "=", "label_id", ",", "update", "=", "update", ")", "new_note", ".", "text", "=", "text", "self", ".", "root", ".", "append", "(", "new_note", ")" ]
Add a note to the xml. If update param is None, it will be the current time.
[ "Add", "a", "note", "to", "the", "xml", ".", "If", "update", "param", "is", "None", "it", "will", "be", "the", "current", "time", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L354-L365
237,929
pokerregion/poker
poker/room/pokerstars.py
Notes.append_note
def append_note(self, player, text): """Append text to an already existing note.""" note = self._find_note(player) note.text += text
python
def append_note(self, player, text): """Append text to an already existing note.""" note = self._find_note(player) note.text += text
[ "def", "append_note", "(", "self", ",", "player", ",", "text", ")", ":", "note", "=", "self", ".", "_find_note", "(", "player", ")", "note", ".", "text", "+=", "text" ]
Append text to an already existing note.
[ "Append", "text", "to", "an", "already", "existing", "note", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L367-L370
237,930
pokerregion/poker
poker/room/pokerstars.py
Notes.prepend_note
def prepend_note(self, player, text): """Prepend text to an already existing note.""" note = self._find_note(player) note.text = text + note.text
python
def prepend_note(self, player, text): """Prepend text to an already existing note.""" note = self._find_note(player) note.text = text + note.text
[ "def", "prepend_note", "(", "self", ",", "player", ",", "text", ")", ":", "note", "=", "self", ".", "_find_note", "(", "player", ")", "note", ".", "text", "=", "text", "+", "note", ".", "text" ]
Prepend text to an already existing note.
[ "Prepend", "text", "to", "an", "already", "existing", "note", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L372-L375
237,931
pokerregion/poker
poker/room/pokerstars.py
Notes.get_label
def get_label(self, name): """Find the label by name.""" label_tag = self._find_label(name) return _Label(label_tag.get('id'), label_tag.get('color'), label_tag.text)
python
def get_label(self, name): """Find the label by name.""" label_tag = self._find_label(name) return _Label(label_tag.get('id'), label_tag.get('color'), label_tag.text)
[ "def", "get_label", "(", "self", ",", "name", ")", ":", "label_tag", "=", "self", ".", "_find_label", "(", "name", ")", "return", "_Label", "(", "label_tag", ".", "get", "(", "'id'", ")", ",", "label_tag", ".", "get", "(", "'color'", ")", ",", "label_tag", ".", "text", ")" ]
Find the label by name.
[ "Find", "the", "label", "by", "name", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L412-L415
237,932
pokerregion/poker
poker/room/pokerstars.py
Notes.add_label
def add_label(self, name, color): """Add a new label. It's id will automatically be calculated.""" color_upper = color.upper() if not self._color_re.match(color_upper): raise ValueError('Invalid color: {}'.format(color)) labels_tag = self.root[0] last_id = int(labels_tag[-1].get('id')) new_id = str(last_id + 1) new_label = etree.Element('label', id=new_id, color=color_upper) new_label.text = name labels_tag.append(new_label)
python
def add_label(self, name, color): """Add a new label. It's id will automatically be calculated.""" color_upper = color.upper() if not self._color_re.match(color_upper): raise ValueError('Invalid color: {}'.format(color)) labels_tag = self.root[0] last_id = int(labels_tag[-1].get('id')) new_id = str(last_id + 1) new_label = etree.Element('label', id=new_id, color=color_upper) new_label.text = name labels_tag.append(new_label)
[ "def", "add_label", "(", "self", ",", "name", ",", "color", ")", ":", "color_upper", "=", "color", ".", "upper", "(", ")", "if", "not", "self", ".", "_color_re", ".", "match", "(", "color_upper", ")", ":", "raise", "ValueError", "(", "'Invalid color: {}'", ".", "format", "(", "color", ")", ")", "labels_tag", "=", "self", ".", "root", "[", "0", "]", "last_id", "=", "int", "(", "labels_tag", "[", "-", "1", "]", ".", "get", "(", "'id'", ")", ")", "new_id", "=", "str", "(", "last_id", "+", "1", ")", "new_label", "=", "etree", ".", "Element", "(", "'label'", ",", "id", "=", "new_id", ",", "color", "=", "color_upper", ")", "new_label", ".", "text", "=", "name", "labels_tag", ".", "append", "(", "new_label", ")" ]
Add a new label. It's id will automatically be calculated.
[ "Add", "a", "new", "label", ".", "It", "s", "id", "will", "automatically", "be", "calculated", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L417-L430
237,933
pokerregion/poker
poker/room/pokerstars.py
Notes.del_label
def del_label(self, name): """Delete a label by name.""" labels_tag = self.root[0] labels_tag.remove(self._find_label(name))
python
def del_label(self, name): """Delete a label by name.""" labels_tag = self.root[0] labels_tag.remove(self._find_label(name))
[ "def", "del_label", "(", "self", ",", "name", ")", ":", "labels_tag", "=", "self", ".", "root", "[", "0", "]", "labels_tag", ".", "remove", "(", "self", ".", "_find_label", "(", "name", ")", ")" ]
Delete a label by name.
[ "Delete", "a", "label", "by", "name", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L432-L435
237,934
pokerregion/poker
poker/room/pokerstars.py
Notes.save
def save(self, filename): """Save the note XML to a file.""" with open(filename, 'w') as fp: fp.write(str(self))
python
def save(self, filename): """Save the note XML to a file.""" with open(filename, 'w') as fp: fp.write(str(self))
[ "def", "save", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "str", "(", "self", ")", ")" ]
Save the note XML to a file.
[ "Save", "the", "note", "XML", "to", "a", "file", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L447-L450
237,935
pokerregion/poker
poker/handhistory.py
_BaseHandHistory.board
def board(self): """Calculates board from flop, turn and river.""" board = [] if self.flop: board.extend(self.flop.cards) if self.turn: board.append(self.turn) if self.river: board.append(self.river) return tuple(board) if board else None
python
def board(self): """Calculates board from flop, turn and river.""" board = [] if self.flop: board.extend(self.flop.cards) if self.turn: board.append(self.turn) if self.river: board.append(self.river) return tuple(board) if board else None
[ "def", "board", "(", "self", ")", ":", "board", "=", "[", "]", "if", "self", ".", "flop", ":", "board", ".", "extend", "(", "self", ".", "flop", ".", "cards", ")", "if", "self", ".", "turn", ":", "board", ".", "append", "(", "self", ".", "turn", ")", "if", "self", ".", "river", ":", "board", ".", "append", "(", "self", ".", "river", ")", "return", "tuple", "(", "board", ")", "if", "board", "else", "None" ]
Calculates board from flop, turn and river.
[ "Calculates", "board", "from", "flop", "turn", "and", "river", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L167-L176
237,936
pokerregion/poker
poker/handhistory.py
_BaseHandHistory._parse_date
def _parse_date(self, date_string): """Parse the date_string and return a datetime object as UTC.""" date = datetime.strptime(date_string, self._DATE_FORMAT) self.date = self._TZ.localize(date).astimezone(pytz.UTC)
python
def _parse_date(self, date_string): """Parse the date_string and return a datetime object as UTC.""" date = datetime.strptime(date_string, self._DATE_FORMAT) self.date = self._TZ.localize(date).astimezone(pytz.UTC)
[ "def", "_parse_date", "(", "self", ",", "date_string", ")", ":", "date", "=", "datetime", ".", "strptime", "(", "date_string", ",", "self", ".", "_DATE_FORMAT", ")", "self", ".", "date", "=", "self", ".", "_TZ", ".", "localize", "(", "date", ")", ".", "astimezone", "(", "pytz", ".", "UTC", ")" ]
Parse the date_string and return a datetime object as UTC.
[ "Parse", "the", "date_string", "and", "return", "a", "datetime", "object", "as", "UTC", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L178-L181
237,937
pokerregion/poker
poker/handhistory.py
_SplittableHandHistoryMixin._split_raw
def _split_raw(self): """Split hand history by sections.""" self._splitted = self._split_re.split(self.raw) # search split locations (basically empty strings) self._sections = [ind for ind, elem in enumerate(self._splitted) if not elem]
python
def _split_raw(self): """Split hand history by sections.""" self._splitted = self._split_re.split(self.raw) # search split locations (basically empty strings) self._sections = [ind for ind, elem in enumerate(self._splitted) if not elem]
[ "def", "_split_raw", "(", "self", ")", ":", "self", ".", "_splitted", "=", "self", ".", "_split_re", ".", "split", "(", "self", ".", "raw", ")", "# search split locations (basically empty strings)", "self", ".", "_sections", "=", "[", "ind", "for", "ind", ",", "elem", "in", "enumerate", "(", "self", ".", "_splitted", ")", "if", "not", "elem", "]" ]
Split hand history by sections.
[ "Split", "hand", "history", "by", "sections", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L201-L206
237,938
pokerregion/poker
poker/website/twoplustwo.py
ForumMember._get_timezone
def _get_timezone(self, root): """Find timezone informatation on bottom of the page.""" tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text hours = int(self._tz_re.search(tz_str).group(1)) return tzoffset(tz_str, hours * 60)
python
def _get_timezone(self, root): """Find timezone informatation on bottom of the page.""" tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text hours = int(self._tz_re.search(tz_str).group(1)) return tzoffset(tz_str, hours * 60)
[ "def", "_get_timezone", "(", "self", ",", "root", ")", ":", "tz_str", "=", "root", ".", "xpath", "(", "'//div[@class=\"smallfont\" and @align=\"center\"]'", ")", "[", "0", "]", ".", "text", "hours", "=", "int", "(", "self", ".", "_tz_re", ".", "search", "(", "tz_str", ")", ".", "group", "(", "1", ")", ")", "return", "tzoffset", "(", "tz_str", ",", "hours", "*", "60", ")" ]
Find timezone informatation on bottom of the page.
[ "Find", "timezone", "informatation", "on", "bottom", "of", "the", "page", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/twoplustwo.py#L125-L129
237,939
pokerregion/poker
poker/website/pokerstars.py
get_current_tournaments
def get_current_tournaments(): """Get the next 200 tournaments from pokerstars.""" schedule_page = requests.get(TOURNAMENTS_XML_URL) root = etree.XML(schedule_page.content) for tour in root.iter('{*}tournament'): yield _Tournament( start_date=tour.findtext('{*}start_date'), name=tour.findtext('{*}name'), game=tour.findtext('{*}game'), buyin=tour.findtext('{*}buy_in_fee'), players=tour.get('players') )
python
def get_current_tournaments(): """Get the next 200 tournaments from pokerstars.""" schedule_page = requests.get(TOURNAMENTS_XML_URL) root = etree.XML(schedule_page.content) for tour in root.iter('{*}tournament'): yield _Tournament( start_date=tour.findtext('{*}start_date'), name=tour.findtext('{*}name'), game=tour.findtext('{*}game'), buyin=tour.findtext('{*}buy_in_fee'), players=tour.get('players') )
[ "def", "get_current_tournaments", "(", ")", ":", "schedule_page", "=", "requests", ".", "get", "(", "TOURNAMENTS_XML_URL", ")", "root", "=", "etree", ".", "XML", "(", "schedule_page", ".", "content", ")", "for", "tour", "in", "root", ".", "iter", "(", "'{*}tournament'", ")", ":", "yield", "_Tournament", "(", "start_date", "=", "tour", ".", "findtext", "(", "'{*}start_date'", ")", ",", "name", "=", "tour", ".", "findtext", "(", "'{*}name'", ")", ",", "game", "=", "tour", ".", "findtext", "(", "'{*}game'", ")", ",", "buyin", "=", "tour", ".", "findtext", "(", "'{*}buy_in_fee'", ")", ",", "players", "=", "tour", ".", "get", "(", "'players'", ")", ")" ]
Get the next 200 tournaments from pokerstars.
[ "Get", "the", "next", "200", "tournaments", "from", "pokerstars", "." ]
2d8cf208fdf2b26bdc935972dcbe7a983a9e9768
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/pokerstars.py#L29-L42
237,940
RKrahl/pytest-dependency
setup.py
_filter_file
def _filter_file(src, dest, subst): """Copy src to dest doing substitutions on the fly. """ substre = re.compile(r'\$(%s)' % '|'.join(subst.keys())) def repl(m): return subst[m.group(1)] with open(src, "rt") as sf, open(dest, "wt") as df: while True: l = sf.readline() if not l: break df.write(re.sub(substre, repl, l))
python
def _filter_file(src, dest, subst): """Copy src to dest doing substitutions on the fly. """ substre = re.compile(r'\$(%s)' % '|'.join(subst.keys())) def repl(m): return subst[m.group(1)] with open(src, "rt") as sf, open(dest, "wt") as df: while True: l = sf.readline() if not l: break df.write(re.sub(substre, repl, l))
[ "def", "_filter_file", "(", "src", ",", "dest", ",", "subst", ")", ":", "substre", "=", "re", ".", "compile", "(", "r'\\$(%s)'", "%", "'|'", ".", "join", "(", "subst", ".", "keys", "(", ")", ")", ")", "def", "repl", "(", "m", ")", ":", "return", "subst", "[", "m", ".", "group", "(", "1", ")", "]", "with", "open", "(", "src", ",", "\"rt\"", ")", "as", "sf", ",", "open", "(", "dest", ",", "\"wt\"", ")", "as", "df", ":", "while", "True", ":", "l", "=", "sf", ".", "readline", "(", ")", "if", "not", "l", ":", "break", "df", ".", "write", "(", "re", ".", "sub", "(", "substre", ",", "repl", ",", "l", ")", ")" ]
Copy src to dest doing substitutions on the fly.
[ "Copy", "src", "to", "dest", "doing", "substitutions", "on", "the", "fly", "." ]
7b7c10818266ec4b05c36c341cf84f05d7ab53ce
https://github.com/RKrahl/pytest-dependency/blob/7b7c10818266ec4b05c36c341cf84f05d7ab53ce/setup.py#L18-L29
237,941
profusion/sgqlc
sgqlc/endpoint/base.py
BaseEndpoint._fixup_graphql_error
def _fixup_graphql_error(self, data): '''Given a possible GraphQL error payload, make sure it's in shape. This will ensure the given ``data`` is in the shape: .. code-block:: json {"errors": [{"message": "some string"}]} If ``errors`` is not an array, it will be made into a single element array, with the object in that format, with its string representation being the message. If an element of the ``errors`` array is not in the format, then it's converted to the format, with its string representation being the message. The input object is not changed, a copy is made if needed. :return: the given ``data`` formatted to the correct shape, a copy is made and returned if any fix up was needed. :rtype: dict ''' original_data = data errors = data.get('errors') original_errors = errors if not isinstance(errors, list): self.logger.warning('data["errors"] is not a list! Fix up data=%r', data) data = data.copy() data['errors'] = [{'message': str(errors)}] return data for i, error in enumerate(errors): if not isinstance(error, dict): self.logger.warning('Error #%d: is not a dict: %r. Fix up!', i, error) if data is original_data: data = data.copy() if errors is original_errors: errors = errors.copy() data['errors'] = errors errors[i] = {'message': str(error)} continue message = error.get('message') if not isinstance(message, str): if data is original_data: data = data.copy() if errors is original_errors: errors = errors.copy() data['errors'] = errors message = str(error) if message is None else str(message) error = error.copy() error['message'] = message errors[i] = error return data
python
def _fixup_graphql_error(self, data): '''Given a possible GraphQL error payload, make sure it's in shape. This will ensure the given ``data`` is in the shape: .. code-block:: json {"errors": [{"message": "some string"}]} If ``errors`` is not an array, it will be made into a single element array, with the object in that format, with its string representation being the message. If an element of the ``errors`` array is not in the format, then it's converted to the format, with its string representation being the message. The input object is not changed, a copy is made if needed. :return: the given ``data`` formatted to the correct shape, a copy is made and returned if any fix up was needed. :rtype: dict ''' original_data = data errors = data.get('errors') original_errors = errors if not isinstance(errors, list): self.logger.warning('data["errors"] is not a list! Fix up data=%r', data) data = data.copy() data['errors'] = [{'message': str(errors)}] return data for i, error in enumerate(errors): if not isinstance(error, dict): self.logger.warning('Error #%d: is not a dict: %r. Fix up!', i, error) if data is original_data: data = data.copy() if errors is original_errors: errors = errors.copy() data['errors'] = errors errors[i] = {'message': str(error)} continue message = error.get('message') if not isinstance(message, str): if data is original_data: data = data.copy() if errors is original_errors: errors = errors.copy() data['errors'] = errors message = str(error) if message is None else str(message) error = error.copy() error['message'] = message errors[i] = error return data
[ "def", "_fixup_graphql_error", "(", "self", ",", "data", ")", ":", "original_data", "=", "data", "errors", "=", "data", ".", "get", "(", "'errors'", ")", "original_errors", "=", "errors", "if", "not", "isinstance", "(", "errors", ",", "list", ")", ":", "self", ".", "logger", ".", "warning", "(", "'data[\"errors\"] is not a list! Fix up data=%r'", ",", "data", ")", "data", "=", "data", ".", "copy", "(", ")", "data", "[", "'errors'", "]", "=", "[", "{", "'message'", ":", "str", "(", "errors", ")", "}", "]", "return", "data", "for", "i", ",", "error", "in", "enumerate", "(", "errors", ")", ":", "if", "not", "isinstance", "(", "error", ",", "dict", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Error #%d: is not a dict: %r. Fix up!'", ",", "i", ",", "error", ")", "if", "data", "is", "original_data", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "errors", "is", "original_errors", ":", "errors", "=", "errors", ".", "copy", "(", ")", "data", "[", "'errors'", "]", "=", "errors", "errors", "[", "i", "]", "=", "{", "'message'", ":", "str", "(", "error", ")", "}", "continue", "message", "=", "error", ".", "get", "(", "'message'", ")", "if", "not", "isinstance", "(", "message", ",", "str", ")", ":", "if", "data", "is", "original_data", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "errors", "is", "original_errors", ":", "errors", "=", "errors", ".", "copy", "(", ")", "data", "[", "'errors'", "]", "=", "errors", "message", "=", "str", "(", "error", ")", "if", "message", "is", "None", "else", "str", "(", "message", ")", "error", "=", "error", ".", "copy", "(", ")", "error", "[", "'message'", "]", "=", "message", "errors", "[", "i", "]", "=", "error", "return", "data" ]
Given a possible GraphQL error payload, make sure it's in shape. This will ensure the given ``data`` is in the shape: .. code-block:: json {"errors": [{"message": "some string"}]} If ``errors`` is not an array, it will be made into a single element array, with the object in that format, with its string representation being the message. If an element of the ``errors`` array is not in the format, then it's converted to the format, with its string representation being the message. The input object is not changed, a copy is made if needed. :return: the given ``data`` formatted to the correct shape, a copy is made and returned if any fix up was needed. :rtype: dict
[ "Given", "a", "possible", "GraphQL", "error", "payload", "make", "sure", "it", "s", "in", "shape", "." ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/base.py#L104-L163
237,942
profusion/sgqlc
sgqlc/endpoint/base.py
BaseEndpoint.snippet
def snippet(code, locations, sep=' | ', colmark=('-', '^'), context=5): '''Given a code and list of locations, convert to snippet lines. return will include line number, a separator (``sep``), then line contents. At most ``context`` lines are shown before each location line. After each location line, the column is marked using ``colmark``. The first character is repeated up to column, the second character is used only once. :return: list of lines of sources or column markups. :rtype: list ''' if not locations: return [] lines = code.split('\n') offset = int(len(lines) / 10) + 1 linenofmt = '%{}d'.format(offset) s = [] for loc in locations: line = max(0, loc.get('line', 1) - 1) column = max(0, loc.get('column', 1) - 1) start_line = max(0, line - context) for i, ln in enumerate(lines[start_line:line + 1], start_line): s.append('{}{}{}'.format(linenofmt % i, sep, ln)) s.append('{}{}{}'.format(' ' * (offset + len(sep)), colmark[0] * column, colmark[1])) return s
python
def snippet(code, locations, sep=' | ', colmark=('-', '^'), context=5): '''Given a code and list of locations, convert to snippet lines. return will include line number, a separator (``sep``), then line contents. At most ``context`` lines are shown before each location line. After each location line, the column is marked using ``colmark``. The first character is repeated up to column, the second character is used only once. :return: list of lines of sources or column markups. :rtype: list ''' if not locations: return [] lines = code.split('\n') offset = int(len(lines) / 10) + 1 linenofmt = '%{}d'.format(offset) s = [] for loc in locations: line = max(0, loc.get('line', 1) - 1) column = max(0, loc.get('column', 1) - 1) start_line = max(0, line - context) for i, ln in enumerate(lines[start_line:line + 1], start_line): s.append('{}{}{}'.format(linenofmt % i, sep, ln)) s.append('{}{}{}'.format(' ' * (offset + len(sep)), colmark[0] * column, colmark[1])) return s
[ "def", "snippet", "(", "code", ",", "locations", ",", "sep", "=", "' | '", ",", "colmark", "=", "(", "'-'", ",", "'^'", ")", ",", "context", "=", "5", ")", ":", "if", "not", "locations", ":", "return", "[", "]", "lines", "=", "code", ".", "split", "(", "'\\n'", ")", "offset", "=", "int", "(", "len", "(", "lines", ")", "/", "10", ")", "+", "1", "linenofmt", "=", "'%{}d'", ".", "format", "(", "offset", ")", "s", "=", "[", "]", "for", "loc", "in", "locations", ":", "line", "=", "max", "(", "0", ",", "loc", ".", "get", "(", "'line'", ",", "1", ")", "-", "1", ")", "column", "=", "max", "(", "0", ",", "loc", ".", "get", "(", "'column'", ",", "1", ")", "-", "1", ")", "start_line", "=", "max", "(", "0", ",", "line", "-", "context", ")", "for", "i", ",", "ln", "in", "enumerate", "(", "lines", "[", "start_line", ":", "line", "+", "1", "]", ",", "start_line", ")", ":", "s", ".", "append", "(", "'{}{}{}'", ".", "format", "(", "linenofmt", "%", "i", ",", "sep", ",", "ln", ")", ")", "s", ".", "append", "(", "'{}{}{}'", ".", "format", "(", "' '", "*", "(", "offset", "+", "len", "(", "sep", ")", ")", ",", "colmark", "[", "0", "]", "*", "column", ",", "colmark", "[", "1", "]", ")", ")", "return", "s" ]
Given a code and list of locations, convert to snippet lines. return will include line number, a separator (``sep``), then line contents. At most ``context`` lines are shown before each location line. After each location line, the column is marked using ``colmark``. The first character is repeated up to column, the second character is used only once. :return: list of lines of sources or column markups. :rtype: list
[ "Given", "a", "code", "and", "list", "of", "locations", "convert", "to", "snippet", "lines", "." ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/base.py#L206-L236
237,943
profusion/sgqlc
sgqlc/types/__init__.py
_create_non_null_wrapper
def _create_non_null_wrapper(name, t): 'creates type wrapper for non-null of given type' def __new__(cls, json_data, selection_list=None): if json_data is None: raise ValueError(name + ' received null value') return t(json_data, selection_list) def __to_graphql_input__(value, indent=0, indent_string=' '): return t.__to_graphql_input__(value, indent, indent_string) return type(name, (t,), { '__new__': __new__, '_%s__auto_register' % name: False, '__to_graphql_input__': __to_graphql_input__, })
python
def _create_non_null_wrapper(name, t): 'creates type wrapper for non-null of given type' def __new__(cls, json_data, selection_list=None): if json_data is None: raise ValueError(name + ' received null value') return t(json_data, selection_list) def __to_graphql_input__(value, indent=0, indent_string=' '): return t.__to_graphql_input__(value, indent, indent_string) return type(name, (t,), { '__new__': __new__, '_%s__auto_register' % name: False, '__to_graphql_input__': __to_graphql_input__, })
[ "def", "_create_non_null_wrapper", "(", "name", ",", "t", ")", ":", "def", "__new__", "(", "cls", ",", "json_data", ",", "selection_list", "=", "None", ")", ":", "if", "json_data", "is", "None", ":", "raise", "ValueError", "(", "name", "+", "' received null value'", ")", "return", "t", "(", "json_data", ",", "selection_list", ")", "def", "__to_graphql_input__", "(", "value", ",", "indent", "=", "0", ",", "indent_string", "=", "' '", ")", ":", "return", "t", ".", "__to_graphql_input__", "(", "value", ",", "indent", ",", "indent_string", ")", "return", "type", "(", "name", ",", "(", "t", ",", ")", ",", "{", "'__new__'", ":", "__new__", ",", "'_%s__auto_register'", "%", "name", ":", "False", ",", "'__to_graphql_input__'", ":", "__to_graphql_input__", ",", "}", ")" ]
creates type wrapper for non-null of given type
[ "creates", "type", "wrapper", "for", "non", "-", "null", "of", "given", "type" ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/__init__.py#L869-L883
237,944
profusion/sgqlc
sgqlc/types/__init__.py
_create_list_of_wrapper
def _create_list_of_wrapper(name, t): 'creates type wrapper for list of given type' def __new__(cls, json_data, selection_list=None): if json_data is None: return None return [t(v, selection_list) for v in json_data] def __to_graphql_input__(value, indent=0, indent_string=' '): r = [] for v in value: r.append(t.__to_graphql_input__(v, indent, indent_string)) return '[' + ', '.join(r) + ']' def __to_json_value__(value): if value is None: return None return [t.__to_json_value__(v) for v in value] return type(name, (t,), { '__new__': __new__, '_%s__auto_register' % name: False, '__to_graphql_input__': __to_graphql_input__, '__to_json_value__': __to_json_value__, })
python
def _create_list_of_wrapper(name, t): 'creates type wrapper for list of given type' def __new__(cls, json_data, selection_list=None): if json_data is None: return None return [t(v, selection_list) for v in json_data] def __to_graphql_input__(value, indent=0, indent_string=' '): r = [] for v in value: r.append(t.__to_graphql_input__(v, indent, indent_string)) return '[' + ', '.join(r) + ']' def __to_json_value__(value): if value is None: return None return [t.__to_json_value__(v) for v in value] return type(name, (t,), { '__new__': __new__, '_%s__auto_register' % name: False, '__to_graphql_input__': __to_graphql_input__, '__to_json_value__': __to_json_value__, })
[ "def", "_create_list_of_wrapper", "(", "name", ",", "t", ")", ":", "def", "__new__", "(", "cls", ",", "json_data", ",", "selection_list", "=", "None", ")", ":", "if", "json_data", "is", "None", ":", "return", "None", "return", "[", "t", "(", "v", ",", "selection_list", ")", "for", "v", "in", "json_data", "]", "def", "__to_graphql_input__", "(", "value", ",", "indent", "=", "0", ",", "indent_string", "=", "' '", ")", ":", "r", "=", "[", "]", "for", "v", "in", "value", ":", "r", ".", "append", "(", "t", ".", "__to_graphql_input__", "(", "v", ",", "indent", ",", "indent_string", ")", ")", "return", "'['", "+", "', '", ".", "join", "(", "r", ")", "+", "']'", "def", "__to_json_value__", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "return", "[", "t", ".", "__to_json_value__", "(", "v", ")", "for", "v", "in", "value", "]", "return", "type", "(", "name", ",", "(", "t", ",", ")", ",", "{", "'__new__'", ":", "__new__", ",", "'_%s__auto_register'", "%", "name", ":", "False", ",", "'__to_graphql_input__'", ":", "__to_graphql_input__", ",", "'__to_json_value__'", ":", "__to_json_value__", ",", "}", ")" ]
creates type wrapper for list of given type
[ "creates", "type", "wrapper", "for", "list", "of", "given", "type" ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/__init__.py#L886-L909
237,945
profusion/sgqlc
sgqlc/endpoint/http.py
add_query_to_url
def add_query_to_url(url, extra_query): '''Adds an extra query to URL, returning the new URL. Extra query may be a dict or a list as returned by :func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`. ''' split = urllib.parse.urlsplit(url) merged_query = urllib.parse.parse_qsl(split.query) if isinstance(extra_query, dict): for k, v in extra_query.items(): if not isinstance(v, (tuple, list)): merged_query.append((k, v)) else: for cv in v: merged_query.append((k, cv)) else: merged_query.extend(extra_query) merged_split = urllib.parse.SplitResult( split.scheme, split.netloc, split.path, urllib.parse.urlencode(merged_query), split.fragment, ) return merged_split.geturl()
python
def add_query_to_url(url, extra_query): '''Adds an extra query to URL, returning the new URL. Extra query may be a dict or a list as returned by :func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`. ''' split = urllib.parse.urlsplit(url) merged_query = urllib.parse.parse_qsl(split.query) if isinstance(extra_query, dict): for k, v in extra_query.items(): if not isinstance(v, (tuple, list)): merged_query.append((k, v)) else: for cv in v: merged_query.append((k, cv)) else: merged_query.extend(extra_query) merged_split = urllib.parse.SplitResult( split.scheme, split.netloc, split.path, urllib.parse.urlencode(merged_query), split.fragment, ) return merged_split.geturl()
[ "def", "add_query_to_url", "(", "url", ",", "extra_query", ")", ":", "split", "=", "urllib", ".", "parse", ".", "urlsplit", "(", "url", ")", "merged_query", "=", "urllib", ".", "parse", ".", "parse_qsl", "(", "split", ".", "query", ")", "if", "isinstance", "(", "extra_query", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "extra_query", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", ":", "merged_query", ".", "append", "(", "(", "k", ",", "v", ")", ")", "else", ":", "for", "cv", "in", "v", ":", "merged_query", ".", "append", "(", "(", "k", ",", "cv", ")", ")", "else", ":", "merged_query", ".", "extend", "(", "extra_query", ")", "merged_split", "=", "urllib", ".", "parse", ".", "SplitResult", "(", "split", ".", "scheme", ",", "split", ".", "netloc", ",", "split", ".", "path", ",", "urllib", ".", "parse", ".", "urlencode", "(", "merged_query", ")", ",", "split", ".", "fragment", ",", ")", "return", "merged_split", ".", "geturl", "(", ")" ]
Adds an extra query to URL, returning the new URL. Extra query may be a dict or a list as returned by :func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
[ "Adds", "an", "extra", "query", "to", "URL", "returning", "the", "new", "URL", "." ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/http.py#L33-L59
237,946
profusion/sgqlc
sgqlc/types/relay.py
connection_args
def connection_args(*lst, **mapping): '''Returns the default parameters for connection. Extra parameters may be given as argument, both as iterable, positional tuples or mapping. By default, provides: - ``after: String`` - ``before: String`` - ``first: Int`` - ``last: Int`` ''' pd = ArgDict(*lst, **mapping) pd.setdefault('after', String) pd.setdefault('before', String) pd.setdefault('first', Int) pd.setdefault('last', Int) return pd
python
def connection_args(*lst, **mapping): '''Returns the default parameters for connection. Extra parameters may be given as argument, both as iterable, positional tuples or mapping. By default, provides: - ``after: String`` - ``before: String`` - ``first: Int`` - ``last: Int`` ''' pd = ArgDict(*lst, **mapping) pd.setdefault('after', String) pd.setdefault('before', String) pd.setdefault('first', Int) pd.setdefault('last', Int) return pd
[ "def", "connection_args", "(", "*", "lst", ",", "*", "*", "mapping", ")", ":", "pd", "=", "ArgDict", "(", "*", "lst", ",", "*", "*", "mapping", ")", "pd", ".", "setdefault", "(", "'after'", ",", "String", ")", "pd", ".", "setdefault", "(", "'before'", ",", "String", ")", "pd", ".", "setdefault", "(", "'first'", ",", "Int", ")", "pd", ".", "setdefault", "(", "'last'", ",", "Int", ")", "return", "pd" ]
Returns the default parameters for connection. Extra parameters may be given as argument, both as iterable, positional tuples or mapping. By default, provides: - ``after: String`` - ``before: String`` - ``first: Int`` - ``last: Int``
[ "Returns", "the", "default", "parameters", "for", "connection", "." ]
684afb059c93f142150043cafac09b7fd52bfa27
https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/relay.py#L406-L424
237,947
nchopin/particles
book/pmcmc/pmmh_lingauss_varying_scale.py
msjd
def msjd(theta): """Mean squared jumping distance. """ s = 0. for p in theta.dtype.names: s += np.sum(np.diff(theta[p], axis=0) ** 2) return s
python
def msjd(theta): """Mean squared jumping distance. """ s = 0. for p in theta.dtype.names: s += np.sum(np.diff(theta[p], axis=0) ** 2) return s
[ "def", "msjd", "(", "theta", ")", ":", "s", "=", "0.", "for", "p", "in", "theta", ".", "dtype", ".", "names", ":", "s", "+=", "np", ".", "sum", "(", "np", ".", "diff", "(", "theta", "[", "p", "]", ",", "axis", "=", "0", ")", "**", "2", ")", "return", "s" ]
Mean squared jumping distance.
[ "Mean", "squared", "jumping", "distance", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/book/pmcmc/pmmh_lingauss_varying_scale.py#L31-L37
237,948
nchopin/particles
particles/smc_samplers.py
StaticModel.loglik
def loglik(self, theta, t=None): """ log-likelihood at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full log-likelihood is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values """ if t is None: t = self.T - 1 l = np.zeros(shape=theta.shape[0]) for s in range(t + 1): l += self.logpyt(theta, s) return l
python
def loglik(self, theta, t=None): """ log-likelihood at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full log-likelihood is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values """ if t is None: t = self.T - 1 l = np.zeros(shape=theta.shape[0]) for s in range(t + 1): l += self.logpyt(theta, s) return l
[ "def", "loglik", "(", "self", ",", "theta", ",", "t", "=", "None", ")", ":", "if", "t", "is", "None", ":", "t", "=", "self", ".", "T", "-", "1", "l", "=", "np", ".", "zeros", "(", "shape", "=", "theta", ".", "shape", "[", "0", "]", ")", "for", "s", "in", "range", "(", "t", "+", "1", ")", ":", "l", "+=", "self", ".", "logpyt", "(", "theta", ",", "s", ")", "return", "l" ]
log-likelihood at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full log-likelihood is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values
[ "log", "-", "likelihood", "at", "given", "parameter", "values", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L91-L111
237,949
nchopin/particles
particles/smc_samplers.py
StaticModel.logpost
def logpost(self, theta, t=None): """Posterior log-density at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full posterior is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values """ return self.prior.logpdf(theta) + self.loglik(theta, t)
python
def logpost(self, theta, t=None): """Posterior log-density at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full posterior is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values """ return self.prior.logpdf(theta) + self.loglik(theta, t)
[ "def", "logpost", "(", "self", ",", "theta", ",", "t", "=", "None", ")", ":", "return", "self", ".", "prior", ".", "logpdf", "(", "theta", ")", "+", "self", ".", "loglik", "(", "theta", ",", "t", ")" ]
Posterior log-density at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full posterior is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values
[ "Posterior", "log", "-", "density", "at", "given", "parameter", "values", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L113-L128
237,950
nchopin/particles
particles/smc_samplers.py
FancyList.copyto
def copyto(self, src, where=None): """ Same syntax and functionality as numpy.copyto """ for n, _ in enumerate(self.l): if where[n]: self.l[n] = src.l[n]
python
def copyto(self, src, where=None): """ Same syntax and functionality as numpy.copyto """ for n, _ in enumerate(self.l): if where[n]: self.l[n] = src.l[n]
[ "def", "copyto", "(", "self", ",", "src", ",", "where", "=", "None", ")", ":", "for", "n", ",", "_", "in", "enumerate", "(", "self", ".", "l", ")", ":", "if", "where", "[", "n", "]", ":", "self", ".", "l", "[", "n", "]", "=", "src", ".", "l", "[", "n", "]" ]
Same syntax and functionality as numpy.copyto
[ "Same", "syntax", "and", "functionality", "as", "numpy", ".", "copyto" ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L178-L185
237,951
nchopin/particles
particles/smc_samplers.py
ThetaParticles.copy
def copy(self): """Returns a copy of the object.""" attrs = {k: self.__dict__[k].copy() for k in self.containers} attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared}) return self.__class__(**attrs)
python
def copy(self): """Returns a copy of the object.""" attrs = {k: self.__dict__[k].copy() for k in self.containers} attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared}) return self.__class__(**attrs)
[ "def", "copy", "(", "self", ")", ":", "attrs", "=", "{", "k", ":", "self", ".", "__dict__", "[", "k", "]", ".", "copy", "(", ")", "for", "k", "in", "self", ".", "containers", "}", "attrs", ".", "update", "(", "{", "k", ":", "cp", ".", "deepcopy", "(", "self", ".", "__dict__", "[", "k", "]", ")", "for", "k", "in", "self", ".", "shared", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "attrs", ")" ]
Returns a copy of the object.
[ "Returns", "a", "copy", "of", "the", "object", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L245-L249
237,952
nchopin/particles
particles/smc_samplers.py
ThetaParticles.copyto
def copyto(self, src, where=None): """Emulates function `copyto` in NumPy. Parameters ---------- where: (N,) bool ndarray True if particle n in src must be copied. src: (N,) `ThetaParticles` object source for each n such that where[n] is True, copy particle n in src into self (at location n) """ for k in self.containers: v = self.__dict__[k] if isinstance(v, np.ndarray): np.copyto(v, src.__dict__[k], where=where) else: v.copyto(src.__dict__[k], where=where)
python
def copyto(self, src, where=None): """Emulates function `copyto` in NumPy. Parameters ---------- where: (N,) bool ndarray True if particle n in src must be copied. src: (N,) `ThetaParticles` object source for each n such that where[n] is True, copy particle n in src into self (at location n) """ for k in self.containers: v = self.__dict__[k] if isinstance(v, np.ndarray): np.copyto(v, src.__dict__[k], where=where) else: v.copyto(src.__dict__[k], where=where)
[ "def", "copyto", "(", "self", ",", "src", ",", "where", "=", "None", ")", ":", "for", "k", "in", "self", ".", "containers", ":", "v", "=", "self", ".", "__dict__", "[", "k", "]", "if", "isinstance", "(", "v", ",", "np", ".", "ndarray", ")", ":", "np", ".", "copyto", "(", "v", ",", "src", ".", "__dict__", "[", "k", "]", ",", "where", "=", "where", ")", "else", ":", "v", ".", "copyto", "(", "src", ".", "__dict__", "[", "k", "]", ",", "where", "=", "where", ")" ]
Emulates function `copyto` in NumPy. Parameters ---------- where: (N,) bool ndarray True if particle n in src must be copied. src: (N,) `ThetaParticles` object source for each n such that where[n] is True, copy particle n in src into self (at location n)
[ "Emulates", "function", "copyto", "in", "NumPy", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L251-L270
237,953
nchopin/particles
particles/smc_samplers.py
ThetaParticles.copyto_at
def copyto_at(self, n, src, m): """Copy to at a given location. Parameters ---------- n: int index where to copy src: `ThetaParticles` object source m: int index of the element to be copied Note ---- Basically, does self[n] <- src[m] """ for k in self.containers: self.__dict__[k][n] = src.__dict__[k][m]
python
def copyto_at(self, n, src, m): """Copy to at a given location. Parameters ---------- n: int index where to copy src: `ThetaParticles` object source m: int index of the element to be copied Note ---- Basically, does self[n] <- src[m] """ for k in self.containers: self.__dict__[k][n] = src.__dict__[k][m]
[ "def", "copyto_at", "(", "self", ",", "n", ",", "src", ",", "m", ")", ":", "for", "k", "in", "self", ".", "containers", ":", "self", ".", "__dict__", "[", "k", "]", "[", "n", "]", "=", "src", ".", "__dict__", "[", "k", "]", "[", "m", "]" ]
Copy to at a given location. Parameters ---------- n: int index where to copy src: `ThetaParticles` object source m: int index of the element to be copied Note ---- Basically, does self[n] <- src[m]
[ "Copy", "to", "at", "a", "given", "location", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L272-L289
237,954
nchopin/particles
particles/smc_samplers.py
MetroParticles.Metropolis
def Metropolis(self, compute_target, mh_options): """Performs a certain number of Metropolis steps. Parameters ---------- compute_target: function computes the target density for the proposed values mh_options: dict + 'type_prop': {'random_walk', 'independent'} type of proposal: either Gaussian random walk, or independent Gaussian + 'adaptive': bool If True, the covariance matrix of the random walk proposal is set to a `rw_scale` times the weighted cov matrix of the particle sample (ignored if proposal is independent) + 'rw_scale': float (default=None) see above (ignored if proposal is independent) + 'indep_scale': float (default=1.1) for an independent proposal, the proposal distribution is Gaussian with mean set to the particle mean, cov set to `indep_scale` times particle covariance + 'nsteps': int (default: 0) number of steps; if 0, the number of steps is chosen adaptively as follows: we stop when the average distance between the starting points and the stopping points increase less than a certain fraction + 'delta_dist': float (default: 0.1) threshold for when nsteps = 0 """ opts = mh_options.copy() nsteps = opts.pop('nsteps', 0) delta_dist = opts.pop('delta_dist', 0.1) proposal = self.choose_proposal(**opts) xout = self.copy() xp = self.__class__(theta=np.empty_like(self.theta)) step_ars = [] for _ in self.mcmc_iterate(nsteps, self.arr, xout.arr, delta_dist): xp.arr[:, :], delta_lp = proposal.step(xout.arr) compute_target(xp) lp_acc = xp.lpost - xout.lpost + delta_lp accept = (np.log(stats.uniform.rvs(size=self.N)) < lp_acc) xout.copyto(xp, where=accept) step_ars.append(np.mean(accept)) xout.acc_rates = self.acc_rates + [step_ars] return xout
python
def Metropolis(self, compute_target, mh_options): """Performs a certain number of Metropolis steps. Parameters ---------- compute_target: function computes the target density for the proposed values mh_options: dict + 'type_prop': {'random_walk', 'independent'} type of proposal: either Gaussian random walk, or independent Gaussian + 'adaptive': bool If True, the covariance matrix of the random walk proposal is set to a `rw_scale` times the weighted cov matrix of the particle sample (ignored if proposal is independent) + 'rw_scale': float (default=None) see above (ignored if proposal is independent) + 'indep_scale': float (default=1.1) for an independent proposal, the proposal distribution is Gaussian with mean set to the particle mean, cov set to `indep_scale` times particle covariance + 'nsteps': int (default: 0) number of steps; if 0, the number of steps is chosen adaptively as follows: we stop when the average distance between the starting points and the stopping points increase less than a certain fraction + 'delta_dist': float (default: 0.1) threshold for when nsteps = 0 """ opts = mh_options.copy() nsteps = opts.pop('nsteps', 0) delta_dist = opts.pop('delta_dist', 0.1) proposal = self.choose_proposal(**opts) xout = self.copy() xp = self.__class__(theta=np.empty_like(self.theta)) step_ars = [] for _ in self.mcmc_iterate(nsteps, self.arr, xout.arr, delta_dist): xp.arr[:, :], delta_lp = proposal.step(xout.arr) compute_target(xp) lp_acc = xp.lpost - xout.lpost + delta_lp accept = (np.log(stats.uniform.rvs(size=self.N)) < lp_acc) xout.copyto(xp, where=accept) step_ars.append(np.mean(accept)) xout.acc_rates = self.acc_rates + [step_ars] return xout
[ "def", "Metropolis", "(", "self", ",", "compute_target", ",", "mh_options", ")", ":", "opts", "=", "mh_options", ".", "copy", "(", ")", "nsteps", "=", "opts", ".", "pop", "(", "'nsteps'", ",", "0", ")", "delta_dist", "=", "opts", ".", "pop", "(", "'delta_dist'", ",", "0.1", ")", "proposal", "=", "self", ".", "choose_proposal", "(", "*", "*", "opts", ")", "xout", "=", "self", ".", "copy", "(", ")", "xp", "=", "self", ".", "__class__", "(", "theta", "=", "np", ".", "empty_like", "(", "self", ".", "theta", ")", ")", "step_ars", "=", "[", "]", "for", "_", "in", "self", ".", "mcmc_iterate", "(", "nsteps", ",", "self", ".", "arr", ",", "xout", ".", "arr", ",", "delta_dist", ")", ":", "xp", ".", "arr", "[", ":", ",", ":", "]", ",", "delta_lp", "=", "proposal", ".", "step", "(", "xout", ".", "arr", ")", "compute_target", "(", "xp", ")", "lp_acc", "=", "xp", ".", "lpost", "-", "xout", ".", "lpost", "+", "delta_lp", "accept", "=", "(", "np", ".", "log", "(", "stats", ".", "uniform", ".", "rvs", "(", "size", "=", "self", ".", "N", ")", ")", "<", "lp_acc", ")", "xout", ".", "copyto", "(", "xp", ",", "where", "=", "accept", ")", "step_ars", ".", "append", "(", "np", ".", "mean", "(", "accept", ")", ")", "xout", ".", "acc_rates", "=", "self", ".", "acc_rates", "+", "[", "step_ars", "]", "return", "xout" ]
Performs a certain number of Metropolis steps. Parameters ---------- compute_target: function computes the target density for the proposed values mh_options: dict + 'type_prop': {'random_walk', 'independent'} type of proposal: either Gaussian random walk, or independent Gaussian + 'adaptive': bool If True, the covariance matrix of the random walk proposal is set to a `rw_scale` times the weighted cov matrix of the particle sample (ignored if proposal is independent) + 'rw_scale': float (default=None) see above (ignored if proposal is independent) + 'indep_scale': float (default=1.1) for an independent proposal, the proposal distribution is Gaussian with mean set to the particle mean, cov set to `indep_scale` times particle covariance + 'nsteps': int (default: 0) number of steps; if 0, the number of steps is chosen adaptively as follows: we stop when the average distance between the starting points and the stopping points increase less than a certain fraction + 'delta_dist': float (default: 0.1) threshold for when nsteps = 0
[ "Performs", "a", "certain", "number", "of", "Metropolis", "steps", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L375-L418
237,955
nchopin/particles
particles/hmm.py
BaumWelch.backward
def backward(self): """Backward recursion. Upon completion, the following list of length T is available: * smth: marginal smoothing probabilities Note ---- Performs the forward step in case it has not been performed before. """ if not self.filt: self.forward() self.smth = [self.filt[-1]] log_trans = np.log(self.hmm.trans_mat) ctg = np.zeros(self.hmm.dim) # cost to go (log-lik of y_{t+1:T} given x_t=k) for filt, next_ft in reversed(list(zip(self.filt[:-1], self.logft[1:]))): new_ctg = np.empty(self.hmm.dim) for k in range(self.hmm.dim): new_ctg[k] = rs.log_sum_exp(log_trans[k, :] + next_ft + ctg) ctg = new_ctg smth = rs.exp_and_normalise(np.log(filt) + ctg) self.smth.append(smth) self.smth.reverse()
python
def backward(self): """Backward recursion. Upon completion, the following list of length T is available: * smth: marginal smoothing probabilities Note ---- Performs the forward step in case it has not been performed before. """ if not self.filt: self.forward() self.smth = [self.filt[-1]] log_trans = np.log(self.hmm.trans_mat) ctg = np.zeros(self.hmm.dim) # cost to go (log-lik of y_{t+1:T} given x_t=k) for filt, next_ft in reversed(list(zip(self.filt[:-1], self.logft[1:]))): new_ctg = np.empty(self.hmm.dim) for k in range(self.hmm.dim): new_ctg[k] = rs.log_sum_exp(log_trans[k, :] + next_ft + ctg) ctg = new_ctg smth = rs.exp_and_normalise(np.log(filt) + ctg) self.smth.append(smth) self.smth.reverse()
[ "def", "backward", "(", "self", ")", ":", "if", "not", "self", ".", "filt", ":", "self", ".", "forward", "(", ")", "self", ".", "smth", "=", "[", "self", ".", "filt", "[", "-", "1", "]", "]", "log_trans", "=", "np", ".", "log", "(", "self", ".", "hmm", ".", "trans_mat", ")", "ctg", "=", "np", ".", "zeros", "(", "self", ".", "hmm", ".", "dim", ")", "# cost to go (log-lik of y_{t+1:T} given x_t=k)", "for", "filt", ",", "next_ft", "in", "reversed", "(", "list", "(", "zip", "(", "self", ".", "filt", "[", ":", "-", "1", "]", ",", "self", ".", "logft", "[", "1", ":", "]", ")", ")", ")", ":", "new_ctg", "=", "np", ".", "empty", "(", "self", ".", "hmm", ".", "dim", ")", "for", "k", "in", "range", "(", "self", ".", "hmm", ".", "dim", ")", ":", "new_ctg", "[", "k", "]", "=", "rs", ".", "log_sum_exp", "(", "log_trans", "[", "k", ",", ":", "]", "+", "next_ft", "+", "ctg", ")", "ctg", "=", "new_ctg", "smth", "=", "rs", ".", "exp_and_normalise", "(", "np", ".", "log", "(", "filt", ")", "+", "ctg", ")", "self", ".", "smth", ".", "append", "(", "smth", ")", "self", ".", "smth", ".", "reverse", "(", ")" ]
Backward recursion. Upon completion, the following list of length T is available: * smth: marginal smoothing probabilities Note ---- Performs the forward step in case it has not been performed before.
[ "Backward", "recursion", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/hmm.py#L215-L238
237,956
nchopin/particles
particles/kalman.py
predict_step
def predict_step(F, covX, filt): """Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel. """ pred_mean = np.matmul(filt.mean, F.T) pred_cov = dotdot(F, filt.cov, F.T) + covX return MeanAndCov(mean=pred_mean, cov=pred_cov)
python
def predict_step(F, covX, filt): """Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel. """ pred_mean = np.matmul(filt.mean, F.T) pred_cov = dotdot(F, filt.cov, F.T) + covX return MeanAndCov(mean=pred_mean, cov=pred_cov)
[ "def", "predict_step", "(", "F", ",", "covX", ",", "filt", ")", ":", "pred_mean", "=", "np", ".", "matmul", "(", "filt", ".", "mean", ",", "F", ".", "T", ")", "pred_cov", "=", "dotdot", "(", "F", ",", "filt", ".", "cov", ",", "F", ".", "T", ")", "+", "covX", "return", "MeanAndCov", "(", "mean", "=", "pred_mean", ",", "cov", "=", "pred_cov", ")" ]
Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel.
[ "Predictive", "step", "of", "Kalman", "filter", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L163-L187
237,957
nchopin/particles
particles/kalman.py
filter_step
def filter_step(G, covY, pred, yt): """Filtering step of Kalman filter. Parameters ---------- G: (dy, dx) numpy array mean of Y_t | X_t is G * X_t covX: (dx, dx) numpy array covariance of Y_t | X_t pred: MeanAndCov object predictive distribution at time t Returns ------- pred: MeanAndCov object filtering distribution at time t logpyt: float log density of Y_t | Y_{0:t-1} """ # data prediction data_pred_mean = np.matmul(pred.mean, G.T) data_pred_cov = dotdot(G, pred.cov, G.T) + covY if covY.shape[0] == 1: logpyt = dists.Normal(loc=data_pred_mean, scale=np.sqrt(data_pred_cov)).logpdf(yt) else: logpyt = dists.MvNormal(loc=data_pred_mean, cov=data_pred_cov).logpdf(yt) # filter residual = yt - data_pred_mean gain = dotdot(pred.cov, G.T, inv(data_pred_cov)) filt_mean = pred.mean + np.matmul(residual, gain.T) filt_cov = pred.cov - dotdot(gain, G, pred.cov) return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt
python
def filter_step(G, covY, pred, yt): """Filtering step of Kalman filter. Parameters ---------- G: (dy, dx) numpy array mean of Y_t | X_t is G * X_t covX: (dx, dx) numpy array covariance of Y_t | X_t pred: MeanAndCov object predictive distribution at time t Returns ------- pred: MeanAndCov object filtering distribution at time t logpyt: float log density of Y_t | Y_{0:t-1} """ # data prediction data_pred_mean = np.matmul(pred.mean, G.T) data_pred_cov = dotdot(G, pred.cov, G.T) + covY if covY.shape[0] == 1: logpyt = dists.Normal(loc=data_pred_mean, scale=np.sqrt(data_pred_cov)).logpdf(yt) else: logpyt = dists.MvNormal(loc=data_pred_mean, cov=data_pred_cov).logpdf(yt) # filter residual = yt - data_pred_mean gain = dotdot(pred.cov, G.T, inv(data_pred_cov)) filt_mean = pred.mean + np.matmul(residual, gain.T) filt_cov = pred.cov - dotdot(gain, G, pred.cov) return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt
[ "def", "filter_step", "(", "G", ",", "covY", ",", "pred", ",", "yt", ")", ":", "# data prediction", "data_pred_mean", "=", "np", ".", "matmul", "(", "pred", ".", "mean", ",", "G", ".", "T", ")", "data_pred_cov", "=", "dotdot", "(", "G", ",", "pred", ".", "cov", ",", "G", ".", "T", ")", "+", "covY", "if", "covY", ".", "shape", "[", "0", "]", "==", "1", ":", "logpyt", "=", "dists", ".", "Normal", "(", "loc", "=", "data_pred_mean", ",", "scale", "=", "np", ".", "sqrt", "(", "data_pred_cov", ")", ")", ".", "logpdf", "(", "yt", ")", "else", ":", "logpyt", "=", "dists", ".", "MvNormal", "(", "loc", "=", "data_pred_mean", ",", "cov", "=", "data_pred_cov", ")", ".", "logpdf", "(", "yt", ")", "# filter", "residual", "=", "yt", "-", "data_pred_mean", "gain", "=", "dotdot", "(", "pred", ".", "cov", ",", "G", ".", "T", ",", "inv", "(", "data_pred_cov", ")", ")", "filt_mean", "=", "pred", ".", "mean", "+", "np", ".", "matmul", "(", "residual", ",", "gain", ".", "T", ")", "filt_cov", "=", "pred", ".", "cov", "-", "dotdot", "(", "gain", ",", "G", ",", "pred", ".", "cov", ")", "return", "MeanAndCov", "(", "mean", "=", "filt_mean", ",", "cov", "=", "filt_cov", ")", ",", "logpyt" ]
Filtering step of Kalman filter. Parameters ---------- G: (dy, dx) numpy array mean of Y_t | X_t is G * X_t covX: (dx, dx) numpy array covariance of Y_t | X_t pred: MeanAndCov object predictive distribution at time t Returns ------- pred: MeanAndCov object filtering distribution at time t logpyt: float log density of Y_t | Y_{0:t-1}
[ "Filtering", "step", "of", "Kalman", "filter", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L190-L223
237,958
nchopin/particles
particles/kalman.py
MVLinearGauss.check_shapes
def check_shapes(self): """ Check all dimensions are correct. """ assert self.covX.shape == (self.dx, self.dx), error_msg assert self.covY.shape == (self.dy, self.dy), error_msg assert self.F.shape == (self.dx, self.dx), error_msg assert self.G.shape == (self.dy, self.dx), error_msg assert self.mu0.shape == (self.dx,), error_msg assert self.cov0.shape == (self.dx, self.dx), error_msg
python
def check_shapes(self): """ Check all dimensions are correct. """ assert self.covX.shape == (self.dx, self.dx), error_msg assert self.covY.shape == (self.dy, self.dy), error_msg assert self.F.shape == (self.dx, self.dx), error_msg assert self.G.shape == (self.dy, self.dx), error_msg assert self.mu0.shape == (self.dx,), error_msg assert self.cov0.shape == (self.dx, self.dx), error_msg
[ "def", "check_shapes", "(", "self", ")", ":", "assert", "self", ".", "covX", ".", "shape", "==", "(", "self", ".", "dx", ",", "self", ".", "dx", ")", ",", "error_msg", "assert", "self", ".", "covY", ".", "shape", "==", "(", "self", ".", "dy", ",", "self", ".", "dy", ")", ",", "error_msg", "assert", "self", ".", "F", ".", "shape", "==", "(", "self", ".", "dx", ",", "self", ".", "dx", ")", ",", "error_msg", "assert", "self", ".", "G", ".", "shape", "==", "(", "self", ".", "dy", ",", "self", ".", "dx", ")", ",", "error_msg", "assert", "self", ".", "mu0", ".", "shape", "==", "(", "self", ".", "dx", ",", ")", ",", "error_msg", "assert", "self", ".", "cov0", ".", "shape", "==", "(", "self", ".", "dx", ",", "self", ".", "dx", ")", ",", "error_msg" ]
Check all dimensions are correct.
[ "Check", "all", "dimensions", "are", "correct", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L326-L335
237,959
nchopin/particles
particles/qmc.py
sobol
def sobol(N, dim, scrambled=1): """ Sobol sequence. Parameters ---------- N : int length of sequence dim: int dimension scrambled: int which scrambling method to use: + 0: no scrambling + 1: Owen's scrambling + 2: Faure-Tezuka + 3: Owen + Faure-Tezuka Returns ------- (N, dim) numpy array. Notes ----- For scrambling, seed is set randomly. Fun fact: this venerable but playful piece of Fortran code occasionally returns numbers above 1. (i.e. for a very small number of seeds); when this happen we just start over (since the seed is randomly generated). """ while(True): seed = np.random.randint(2**32) out = lowdiscrepancy.sobol(N, dim, scrambled, seed, 1, 0) if (scrambled == 0) or ((out < 1.).all() and (out > 0.).all()): # no need to test if scrambled==0 return out
python
def sobol(N, dim, scrambled=1): """ Sobol sequence. Parameters ---------- N : int length of sequence dim: int dimension scrambled: int which scrambling method to use: + 0: no scrambling + 1: Owen's scrambling + 2: Faure-Tezuka + 3: Owen + Faure-Tezuka Returns ------- (N, dim) numpy array. Notes ----- For scrambling, seed is set randomly. Fun fact: this venerable but playful piece of Fortran code occasionally returns numbers above 1. (i.e. for a very small number of seeds); when this happen we just start over (since the seed is randomly generated). """ while(True): seed = np.random.randint(2**32) out = lowdiscrepancy.sobol(N, dim, scrambled, seed, 1, 0) if (scrambled == 0) or ((out < 1.).all() and (out > 0.).all()): # no need to test if scrambled==0 return out
[ "def", "sobol", "(", "N", ",", "dim", ",", "scrambled", "=", "1", ")", ":", "while", "(", "True", ")", ":", "seed", "=", "np", ".", "random", ".", "randint", "(", "2", "**", "32", ")", "out", "=", "lowdiscrepancy", ".", "sobol", "(", "N", ",", "dim", ",", "scrambled", ",", "seed", ",", "1", ",", "0", ")", "if", "(", "scrambled", "==", "0", ")", "or", "(", "(", "out", "<", "1.", ")", ".", "all", "(", ")", "and", "(", "out", ">", "0.", ")", ".", "all", "(", ")", ")", ":", "# no need to test if scrambled==0", "return", "out" ]
Sobol sequence. Parameters ---------- N : int length of sequence dim: int dimension scrambled: int which scrambling method to use: + 0: no scrambling + 1: Owen's scrambling + 2: Faure-Tezuka + 3: Owen + Faure-Tezuka Returns ------- (N, dim) numpy array. Notes ----- For scrambling, seed is set randomly. Fun fact: this venerable but playful piece of Fortran code occasionally returns numbers above 1. (i.e. for a very small number of seeds); when this happen we just start over (since the seed is randomly generated).
[ "Sobol", "sequence", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/qmc.py#L29-L64
237,960
nchopin/particles
particles/smoothing.py
smoothing_worker
def smoothing_worker(method=None, N=100, seed=None, fk=None, fk_info=None, add_func=None, log_gamma=None): """Generic worker for off-line smoothing algorithms. This worker may be used in conjunction with utils.multiplexer in order to run in parallel (and eventually compare) off-line smoothing algorithms. Parameters ---------- method: string ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC', 'two-filter_ON', 'two-filter_ON_prop', 'two-filter_ON2'] N: int number of particles seed: int random generator seed; if None, generator is not seeded fk: Feynman-Kac object The Feynman-Kac model for the forward filter fk_info: Feynman-Kac object (default=None) the Feynman-Kac model for the information filter; if None, set to the same Feynman-Kac model as fk, with data in reverse add_func: function, with signature (t, x, xf) additive function, at time t, for particles x=x_t and xf=x_{t+1} log_gamma: function log of function gamma (see book) Returns ------- a dict with fields: est: a ndarray of length T cpu_time """ T = fk.T if fk_info is None: fk_info = fk.__class__(ssm=fk.ssm, data=fk.data[::-1]) if seed: random.seed(seed) est = np.zeros(T - 1) if method=='FFBS_QMC': pf = particles.SQMC(fk=fk, N=N, store_history=True) else: pf = particles.SMC(fk=fk, N=N, store_history=True) tic = time.clock() pf.run() if method in ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC']: if method.startswith('FFBS_ON'): z = pf.hist.backward_sampling(N, linear_cost=(method == 'FFBS_ON')) else: z = pf.hist.backward_sampling_qmc(N) for t in range(T - 1): est[t] = np.mean(add_func(t, z[t], z[t + 1])) elif method in ['two-filter_ON2', 'two-filter_ON', 'two-filter_ON_prop']: infopf = particles.SMC(fk=fk_info, N=N, store_history=True) infopf.run() for t in range(T - 1): psi = lambda x, xf: add_func(t, x, xf) if method == 'two-filter_ON2': est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma) else: ti = T - 2 - t # t+1 for info filter if method == 'two-filter_ON_prop': modif_fwd = stats.norm.logpdf(pf.hist.X[t], loc=np.mean(infopf.hist.X[ti + 1]), scale=np.std(infopf.hist.X[ti + 1])) modif_info = stats.norm.logpdf(infopf.hist.X[ti], loc=np.mean(pf.hist.X[t + 1]), scale=np.std(pf.hist.X[t + 1])) else: modif_fwd, modif_info = None, None est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma, linear_cost=True, modif_forward=modif_fwd, modif_info=modif_info) else: print('no such method?') cpu_time = time.clock() - tic print(method + ' took %.2f s for N=%i' % (cpu_time, N)) return {'est': est, 'cpu': cpu_time}
python
def smoothing_worker(method=None, N=100, seed=None, fk=None, fk_info=None, add_func=None, log_gamma=None): """Generic worker for off-line smoothing algorithms. This worker may be used in conjunction with utils.multiplexer in order to run in parallel (and eventually compare) off-line smoothing algorithms. Parameters ---------- method: string ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC', 'two-filter_ON', 'two-filter_ON_prop', 'two-filter_ON2'] N: int number of particles seed: int random generator seed; if None, generator is not seeded fk: Feynman-Kac object The Feynman-Kac model for the forward filter fk_info: Feynman-Kac object (default=None) the Feynman-Kac model for the information filter; if None, set to the same Feynman-Kac model as fk, with data in reverse add_func: function, with signature (t, x, xf) additive function, at time t, for particles x=x_t and xf=x_{t+1} log_gamma: function log of function gamma (see book) Returns ------- a dict with fields: est: a ndarray of length T cpu_time """ T = fk.T if fk_info is None: fk_info = fk.__class__(ssm=fk.ssm, data=fk.data[::-1]) if seed: random.seed(seed) est = np.zeros(T - 1) if method=='FFBS_QMC': pf = particles.SQMC(fk=fk, N=N, store_history=True) else: pf = particles.SMC(fk=fk, N=N, store_history=True) tic = time.clock() pf.run() if method in ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC']: if method.startswith('FFBS_ON'): z = pf.hist.backward_sampling(N, linear_cost=(method == 'FFBS_ON')) else: z = pf.hist.backward_sampling_qmc(N) for t in range(T - 1): est[t] = np.mean(add_func(t, z[t], z[t + 1])) elif method in ['two-filter_ON2', 'two-filter_ON', 'two-filter_ON_prop']: infopf = particles.SMC(fk=fk_info, N=N, store_history=True) infopf.run() for t in range(T - 1): psi = lambda x, xf: add_func(t, x, xf) if method == 'two-filter_ON2': est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma) else: ti = T - 2 - t # t+1 for info filter if method == 'two-filter_ON_prop': modif_fwd = stats.norm.logpdf(pf.hist.X[t], loc=np.mean(infopf.hist.X[ti + 1]), scale=np.std(infopf.hist.X[ti + 1])) modif_info = stats.norm.logpdf(infopf.hist.X[ti], loc=np.mean(pf.hist.X[t + 1]), scale=np.std(pf.hist.X[t + 1])) else: modif_fwd, modif_info = None, None est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma, linear_cost=True, modif_forward=modif_fwd, modif_info=modif_info) else: print('no such method?') cpu_time = time.clock() - tic print(method + ' took %.2f s for N=%i' % (cpu_time, N)) return {'est': est, 'cpu': cpu_time}
[ "def", "smoothing_worker", "(", "method", "=", "None", ",", "N", "=", "100", ",", "seed", "=", "None", ",", "fk", "=", "None", ",", "fk_info", "=", "None", ",", "add_func", "=", "None", ",", "log_gamma", "=", "None", ")", ":", "T", "=", "fk", ".", "T", "if", "fk_info", "is", "None", ":", "fk_info", "=", "fk", ".", "__class__", "(", "ssm", "=", "fk", ".", "ssm", ",", "data", "=", "fk", ".", "data", "[", ":", ":", "-", "1", "]", ")", "if", "seed", ":", "random", ".", "seed", "(", "seed", ")", "est", "=", "np", ".", "zeros", "(", "T", "-", "1", ")", "if", "method", "==", "'FFBS_QMC'", ":", "pf", "=", "particles", ".", "SQMC", "(", "fk", "=", "fk", ",", "N", "=", "N", ",", "store_history", "=", "True", ")", "else", ":", "pf", "=", "particles", ".", "SMC", "(", "fk", "=", "fk", ",", "N", "=", "N", ",", "store_history", "=", "True", ")", "tic", "=", "time", ".", "clock", "(", ")", "pf", ".", "run", "(", ")", "if", "method", "in", "[", "'FFBS_ON'", ",", "'FFBS_ON2'", ",", "'FFBS_QMC'", "]", ":", "if", "method", ".", "startswith", "(", "'FFBS_ON'", ")", ":", "z", "=", "pf", ".", "hist", ".", "backward_sampling", "(", "N", ",", "linear_cost", "=", "(", "method", "==", "'FFBS_ON'", ")", ")", "else", ":", "z", "=", "pf", ".", "hist", ".", "backward_sampling_qmc", "(", "N", ")", "for", "t", "in", "range", "(", "T", "-", "1", ")", ":", "est", "[", "t", "]", "=", "np", ".", "mean", "(", "add_func", "(", "t", ",", "z", "[", "t", "]", ",", "z", "[", "t", "+", "1", "]", ")", ")", "elif", "method", "in", "[", "'two-filter_ON2'", ",", "'two-filter_ON'", ",", "'two-filter_ON_prop'", "]", ":", "infopf", "=", "particles", ".", "SMC", "(", "fk", "=", "fk_info", ",", "N", "=", "N", ",", "store_history", "=", "True", ")", "infopf", ".", "run", "(", ")", "for", "t", "in", "range", "(", "T", "-", "1", ")", ":", "psi", "=", "lambda", "x", ",", "xf", ":", "add_func", "(", "t", ",", "x", ",", "xf", ")", "if", "method", "==", "'two-filter_ON2'", ":", "est", "[", "t", "]", "=", "pf", ".", "hist", ".", "twofilter_smoothing", "(", "t", ",", "infopf", ",", "psi", ",", "log_gamma", ")", "else", ":", "ti", "=", "T", "-", "2", "-", "t", "# t+1 for info filter", "if", "method", "==", "'two-filter_ON_prop'", ":", "modif_fwd", "=", "stats", ".", "norm", ".", "logpdf", "(", "pf", ".", "hist", ".", "X", "[", "t", "]", ",", "loc", "=", "np", ".", "mean", "(", "infopf", ".", "hist", ".", "X", "[", "ti", "+", "1", "]", ")", ",", "scale", "=", "np", ".", "std", "(", "infopf", ".", "hist", ".", "X", "[", "ti", "+", "1", "]", ")", ")", "modif_info", "=", "stats", ".", "norm", ".", "logpdf", "(", "infopf", ".", "hist", ".", "X", "[", "ti", "]", ",", "loc", "=", "np", ".", "mean", "(", "pf", ".", "hist", ".", "X", "[", "t", "+", "1", "]", ")", ",", "scale", "=", "np", ".", "std", "(", "pf", ".", "hist", ".", "X", "[", "t", "+", "1", "]", ")", ")", "else", ":", "modif_fwd", ",", "modif_info", "=", "None", ",", "None", "est", "[", "t", "]", "=", "pf", ".", "hist", ".", "twofilter_smoothing", "(", "t", ",", "infopf", ",", "psi", ",", "log_gamma", ",", "linear_cost", "=", "True", ",", "modif_forward", "=", "modif_fwd", ",", "modif_info", "=", "modif_info", ")", "else", ":", "print", "(", "'no such method?'", ")", "cpu_time", "=", "time", ".", "clock", "(", ")", "-", "tic", "print", "(", "method", "+", "' took %.2f s for N=%i'", "%", "(", "cpu_time", ",", "N", ")", ")", "return", "{", "'est'", ":", "est", ",", "'cpu'", ":", "cpu_time", "}" ]
Generic worker for off-line smoothing algorithms. This worker may be used in conjunction with utils.multiplexer in order to run in parallel (and eventually compare) off-line smoothing algorithms. Parameters ---------- method: string ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC', 'two-filter_ON', 'two-filter_ON_prop', 'two-filter_ON2'] N: int number of particles seed: int random generator seed; if None, generator is not seeded fk: Feynman-Kac object The Feynman-Kac model for the forward filter fk_info: Feynman-Kac object (default=None) the Feynman-Kac model for the information filter; if None, set to the same Feynman-Kac model as fk, with data in reverse add_func: function, with signature (t, x, xf) additive function, at time t, for particles x=x_t and xf=x_{t+1} log_gamma: function log of function gamma (see book) Returns ------- a dict with fields: est: a ndarray of length T cpu_time
[ "Generic", "worker", "for", "off", "-", "line", "smoothing", "algorithms", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L367-L444
237,961
nchopin/particles
particles/smoothing.py
ParticleHistory.save
def save(self, X=None, w=None, A=None): """Save one "page" of history at a given time. .. note:: This method is used internally by `SMC` to store the state of the particle system at each time t. In most cases, users should not have to call this method directly. """ self.X.append(X) self.wgt.append(w) self.A.append(A)
python
def save(self, X=None, w=None, A=None): """Save one "page" of history at a given time. .. note:: This method is used internally by `SMC` to store the state of the particle system at each time t. In most cases, users should not have to call this method directly. """ self.X.append(X) self.wgt.append(w) self.A.append(A)
[ "def", "save", "(", "self", ",", "X", "=", "None", ",", "w", "=", "None", ",", "A", "=", "None", ")", ":", "self", ".", "X", ".", "append", "(", "X", ")", "self", ".", "wgt", ".", "append", "(", "w", ")", "self", ".", "A", ".", "append", "(", "A", ")" ]
Save one "page" of history at a given time. .. note:: This method is used internally by `SMC` to store the state of the particle system at each time t. In most cases, users should not have to call this method directly.
[ "Save", "one", "page", "of", "history", "at", "a", "given", "time", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L92-L102
237,962
nchopin/particles
particles/smoothing.py
ParticleHistory.extract_one_trajectory
def extract_one_trajectory(self): """Extract a single trajectory from the particle history. The final state is chosen randomly, then the corresponding trajectory is constructed backwards, until time t=0. """ traj = [] for t in reversed(range(self.T)): if t == self.T - 1: n = rs.multinomial_once(self.wgt[-1].W) else: n = self.A[t + 1][n] traj.append(self.X[t][n]) return traj[::-1]
python
def extract_one_trajectory(self): """Extract a single trajectory from the particle history. The final state is chosen randomly, then the corresponding trajectory is constructed backwards, until time t=0. """ traj = [] for t in reversed(range(self.T)): if t == self.T - 1: n = rs.multinomial_once(self.wgt[-1].W) else: n = self.A[t + 1][n] traj.append(self.X[t][n]) return traj[::-1]
[ "def", "extract_one_trajectory", "(", "self", ")", ":", "traj", "=", "[", "]", "for", "t", "in", "reversed", "(", "range", "(", "self", ".", "T", ")", ")", ":", "if", "t", "==", "self", ".", "T", "-", "1", ":", "n", "=", "rs", ".", "multinomial_once", "(", "self", ".", "wgt", "[", "-", "1", "]", ".", "W", ")", "else", ":", "n", "=", "self", ".", "A", "[", "t", "+", "1", "]", "[", "n", "]", "traj", ".", "append", "(", "self", ".", "X", "[", "t", "]", "[", "n", "]", ")", "return", "traj", "[", ":", ":", "-", "1", "]" ]
Extract a single trajectory from the particle history. The final state is chosen randomly, then the corresponding trajectory is constructed backwards, until time t=0.
[ "Extract", "a", "single", "trajectory", "from", "the", "particle", "history", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L104-L117
237,963
nchopin/particles
particles/smoothing.py
ParticleHistory.compute_trajectories
def compute_trajectories(self): """Compute the N trajectories that constitute the current genealogy. Compute and add attribute ``B`` to ``self`` where ``B`` is an array such that ``B[t,n]`` is the index of ancestor at time t of particle X_T^n, where T is the current length of history. """ self.B = np.empty((self.T, self.N), 'int') self.B[-1, :] = self.A[-1] for t in reversed(range(self.T - 1)): self.B[t, :] = self.A[t + 1][self.B[t + 1]]
python
def compute_trajectories(self): """Compute the N trajectories that constitute the current genealogy. Compute and add attribute ``B`` to ``self`` where ``B`` is an array such that ``B[t,n]`` is the index of ancestor at time t of particle X_T^n, where T is the current length of history. """ self.B = np.empty((self.T, self.N), 'int') self.B[-1, :] = self.A[-1] for t in reversed(range(self.T - 1)): self.B[t, :] = self.A[t + 1][self.B[t + 1]]
[ "def", "compute_trajectories", "(", "self", ")", ":", "self", ".", "B", "=", "np", ".", "empty", "(", "(", "self", ".", "T", ",", "self", ".", "N", ")", ",", "'int'", ")", "self", ".", "B", "[", "-", "1", ",", ":", "]", "=", "self", ".", "A", "[", "-", "1", "]", "for", "t", "in", "reversed", "(", "range", "(", "self", ".", "T", "-", "1", ")", ")", ":", "self", ".", "B", "[", "t", ",", ":", "]", "=", "self", ".", "A", "[", "t", "+", "1", "]", "[", "self", ".", "B", "[", "t", "+", "1", "]", "]" ]
Compute the N trajectories that constitute the current genealogy. Compute and add attribute ``B`` to ``self`` where ``B`` is an array such that ``B[t,n]`` is the index of ancestor at time t of particle X_T^n, where T is the current length of history.
[ "Compute", "the", "N", "trajectories", "that", "constitute", "the", "current", "genealogy", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L119-L129
237,964
nchopin/particles
particles/smoothing.py
ParticleHistory.twofilter_smoothing
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): """Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) """ ti = self.T - 2 - t # t+1 in reverse if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
python
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): """Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) """ ti = self.T - 2 - t # t+1 in reverse if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
[ "def", "twofilter_smoothing", "(", "self", ",", "t", ",", "info", ",", "phi", ",", "loggamma", ",", "linear_cost", "=", "False", ",", "return_ess", "=", "False", ",", "modif_forward", "=", "None", ",", "modif_info", "=", "None", ")", ":", "ti", "=", "self", ".", "T", "-", "2", "-", "t", "# t+1 in reverse", "if", "t", "<", "0", "or", "t", ">=", "self", ".", "T", "-", "1", ":", "raise", "ValueError", "(", "'two-filter smoothing: t must be in range 0,...,T-2'", ")", "lwinfo", "=", "info", ".", "hist", ".", "wgt", "[", "ti", "]", ".", "lw", "-", "loggamma", "(", "info", ".", "hist", ".", "X", "[", "ti", "]", ")", "if", "linear_cost", ":", "return", "self", ".", "_twofilter_smoothing_ON", "(", "t", ",", "ti", ",", "info", ",", "phi", ",", "lwinfo", ",", "return_ess", ",", "modif_forward", ",", "modif_info", ")", "else", ":", "return", "self", ".", "_twofilter_smoothing_ON2", "(", "t", ",", "ti", ",", "info", ",", "phi", ",", "lwinfo", ")" ]
Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
[ "Two", "-", "filter", "smoothing", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L286-L317
237,965
nchopin/particles
particles/core.py
multiSMC
def multiSMC(nruns=10, nprocs=0, out_func=None, **args): """Run SMC algorithms in parallel, for different combinations of parameters. `multiSMC` relies on the `multiplexer` utility, and obeys the same logic. A basic usage is:: results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0) This runs the same SMC algorithm 20 times, using all available CPU cores. The output, ``results``, is a list of 20 dictionaries; a given dict corresponds to a single run, and contains the following (key, value) pairs: + ``'run'``: a run identifier (a number between 0 and nruns-1) + ``'output'``: the corresponding SMC object (once method run was completed) Since a `SMC` object may take a lot of space in memory (especially when the option ``store_history`` is set to True), it is possible to require `multiSMC` to store only some chosen summary of the SMC runs, using option `out_func`. For instance, if we only want to store the estimate of the log-likelihood of the model obtained from each particle filter:: of = lambda pf: pf.logLt results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of) It is also possible to vary the parameters. Say:: results = multiSMC(fk=my_fk_model, N=[100, 500, 1000]) will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for N=500, and 10 times for N=1000. The number 10 comes from the fact that we did not specify nruns, and its default value is 10. The 30 dictionaries obtained in results will then contain an extra (key, value) pair that will give the value of N for which the run was performed. It is possible to vary several arguments. Each time a list must be provided. The end result will amount to take a *cartesian product* of the arguments:: results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial', 'residual'], nruns=20) In that case we run our algorithm 80 times: 20 times with N=100 and resampling set to multinomial, 20 times with N=100 and resampling set to residual and so on. Parameters ---------- * nruns: int, optional number of runs (default is 10) * nprocs: int, optional number of processors to use; if negative, number of cores not to use. Default value is 1 (no multiprocessing) * out_func: callable, optional function to transform the output of each SMC run. (If not given, output will be the complete SMC object). * args: dict arguments passed to SMC class Returns ------- A list of dicts See also -------- `utils.multiplexer`: for more details on the syntax. """ def f(**args): pf = SMC(**args) pf.run() return out_func(pf) if out_func is None: out_func = lambda x: x return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True, **args)
python
def multiSMC(nruns=10, nprocs=0, out_func=None, **args): """Run SMC algorithms in parallel, for different combinations of parameters. `multiSMC` relies on the `multiplexer` utility, and obeys the same logic. A basic usage is:: results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0) This runs the same SMC algorithm 20 times, using all available CPU cores. The output, ``results``, is a list of 20 dictionaries; a given dict corresponds to a single run, and contains the following (key, value) pairs: + ``'run'``: a run identifier (a number between 0 and nruns-1) + ``'output'``: the corresponding SMC object (once method run was completed) Since a `SMC` object may take a lot of space in memory (especially when the option ``store_history`` is set to True), it is possible to require `multiSMC` to store only some chosen summary of the SMC runs, using option `out_func`. For instance, if we only want to store the estimate of the log-likelihood of the model obtained from each particle filter:: of = lambda pf: pf.logLt results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of) It is also possible to vary the parameters. Say:: results = multiSMC(fk=my_fk_model, N=[100, 500, 1000]) will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for N=500, and 10 times for N=1000. The number 10 comes from the fact that we did not specify nruns, and its default value is 10. The 30 dictionaries obtained in results will then contain an extra (key, value) pair that will give the value of N for which the run was performed. It is possible to vary several arguments. Each time a list must be provided. The end result will amount to take a *cartesian product* of the arguments:: results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial', 'residual'], nruns=20) In that case we run our algorithm 80 times: 20 times with N=100 and resampling set to multinomial, 20 times with N=100 and resampling set to residual and so on. Parameters ---------- * nruns: int, optional number of runs (default is 10) * nprocs: int, optional number of processors to use; if negative, number of cores not to use. Default value is 1 (no multiprocessing) * out_func: callable, optional function to transform the output of each SMC run. (If not given, output will be the complete SMC object). * args: dict arguments passed to SMC class Returns ------- A list of dicts See also -------- `utils.multiplexer`: for more details on the syntax. """ def f(**args): pf = SMC(**args) pf.run() return out_func(pf) if out_func is None: out_func = lambda x: x return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True, **args)
[ "def", "multiSMC", "(", "nruns", "=", "10", ",", "nprocs", "=", "0", ",", "out_func", "=", "None", ",", "*", "*", "args", ")", ":", "def", "f", "(", "*", "*", "args", ")", ":", "pf", "=", "SMC", "(", "*", "*", "args", ")", "pf", ".", "run", "(", ")", "return", "out_func", "(", "pf", ")", "if", "out_func", "is", "None", ":", "out_func", "=", "lambda", "x", ":", "x", "return", "utils", ".", "multiplexer", "(", "f", "=", "f", ",", "nruns", "=", "nruns", ",", "nprocs", "=", "nprocs", ",", "seeding", "=", "True", ",", "*", "*", "args", ")" ]
Run SMC algorithms in parallel, for different combinations of parameters. `multiSMC` relies on the `multiplexer` utility, and obeys the same logic. A basic usage is:: results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0) This runs the same SMC algorithm 20 times, using all available CPU cores. The output, ``results``, is a list of 20 dictionaries; a given dict corresponds to a single run, and contains the following (key, value) pairs: + ``'run'``: a run identifier (a number between 0 and nruns-1) + ``'output'``: the corresponding SMC object (once method run was completed) Since a `SMC` object may take a lot of space in memory (especially when the option ``store_history`` is set to True), it is possible to require `multiSMC` to store only some chosen summary of the SMC runs, using option `out_func`. For instance, if we only want to store the estimate of the log-likelihood of the model obtained from each particle filter:: of = lambda pf: pf.logLt results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of) It is also possible to vary the parameters. Say:: results = multiSMC(fk=my_fk_model, N=[100, 500, 1000]) will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for N=500, and 10 times for N=1000. The number 10 comes from the fact that we did not specify nruns, and its default value is 10. The 30 dictionaries obtained in results will then contain an extra (key, value) pair that will give the value of N for which the run was performed. It is possible to vary several arguments. Each time a list must be provided. The end result will amount to take a *cartesian product* of the arguments:: results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial', 'residual'], nruns=20) In that case we run our algorithm 80 times: 20 times with N=100 and resampling set to multinomial, 20 times with N=100 and resampling set to residual and so on. Parameters ---------- * nruns: int, optional number of runs (default is 10) * nprocs: int, optional number of processors to use; if negative, number of cores not to use. Default value is 1 (no multiprocessing) * out_func: callable, optional function to transform the output of each SMC run. (If not given, output will be the complete SMC object). * args: dict arguments passed to SMC class Returns ------- A list of dicts See also -------- `utils.multiplexer`: for more details on the syntax.
[ "Run", "SMC", "algorithms", "in", "parallel", "for", "different", "combinations", "of", "parameters", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/core.py#L438-L512
237,966
nchopin/particles
particles/core.py
SMC.reset_weights
def reset_weights(self): """Reset weights after a resampling step. """ if self.fk.isAPF: lw = (rs.log_mean_exp(self.logetat, W=self.W) - self.logetat[self.A]) self.wgts = rs.Weights(lw=lw) else: self.wgts = rs.Weights()
python
def reset_weights(self): """Reset weights after a resampling step. """ if self.fk.isAPF: lw = (rs.log_mean_exp(self.logetat, W=self.W) - self.logetat[self.A]) self.wgts = rs.Weights(lw=lw) else: self.wgts = rs.Weights()
[ "def", "reset_weights", "(", "self", ")", ":", "if", "self", ".", "fk", ".", "isAPF", ":", "lw", "=", "(", "rs", ".", "log_mean_exp", "(", "self", ".", "logetat", ",", "W", "=", "self", ".", "W", ")", "-", "self", ".", "logetat", "[", "self", ".", "A", "]", ")", "self", ".", "wgts", "=", "rs", ".", "Weights", "(", "lw", "=", "lw", ")", "else", ":", "self", ".", "wgts", "=", "rs", ".", "Weights", "(", ")" ]
Reset weights after a resampling step.
[ "Reset", "weights", "after", "a", "resampling", "step", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/core.py#L317-L325
237,967
nchopin/particles
particles/resampling.py
log_sum_exp
def log_sum_exp(v): """Log of the sum of the exp of the arguments. Parameters ---------- v: ndarray Returns ------- l: float l = log(sum(exp(v))) Note ---- use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v before exponentiating, then we add it back See also -------- log_mean_exp """ m = v.max() return m + np.log(np.sum(np.exp(v - m)))
python
def log_sum_exp(v): """Log of the sum of the exp of the arguments. Parameters ---------- v: ndarray Returns ------- l: float l = log(sum(exp(v))) Note ---- use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v before exponentiating, then we add it back See also -------- log_mean_exp """ m = v.max() return m + np.log(np.sum(np.exp(v - m)))
[ "def", "log_sum_exp", "(", "v", ")", ":", "m", "=", "v", ".", "max", "(", ")", "return", "m", "+", "np", ".", "log", "(", "np", ".", "sum", "(", "np", ".", "exp", "(", "v", "-", "m", ")", ")", ")" ]
Log of the sum of the exp of the arguments. Parameters ---------- v: ndarray Returns ------- l: float l = log(sum(exp(v))) Note ---- use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v before exponentiating, then we add it back See also -------- log_mean_exp
[ "Log", "of", "the", "sum", "of", "the", "exp", "of", "the", "arguments", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L233-L256
237,968
nchopin/particles
particles/resampling.py
log_sum_exp_ab
def log_sum_exp_ab(a, b): """log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b) """ if a > b: return a + np.log(1. + np.exp(b - a)) else: return b + np.log(1. + np.exp(a - b))
python
def log_sum_exp_ab(a, b): """log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b) """ if a > b: return a + np.log(1. + np.exp(b - a)) else: return b + np.log(1. + np.exp(a - b))
[ "def", "log_sum_exp_ab", "(", "a", ",", "b", ")", ":", "if", "a", ">", "b", ":", "return", "a", "+", "np", ".", "log", "(", "1.", "+", "np", ".", "exp", "(", "b", "-", "a", ")", ")", "else", ":", "return", "b", "+", "np", ".", "log", "(", "1.", "+", "np", ".", "exp", "(", "a", "-", "b", ")", ")" ]
log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b)
[ "log_sum_exp", "for", "two", "scalars", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L259-L274
237,969
nchopin/particles
particles/resampling.py
wmean_and_var
def wmean_and_var(W, x): """Component-wise weighted mean and variance. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: ndarray (such that shape[0]==N) data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.average(x, weights=W, axis=0) m2 = np.average(x**2, weights=W, axis=0) v = m2 - m**2 return {'mean': m, 'var': v}
python
def wmean_and_var(W, x): """Component-wise weighted mean and variance. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: ndarray (such that shape[0]==N) data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.average(x, weights=W, axis=0) m2 = np.average(x**2, weights=W, axis=0) v = m2 - m**2 return {'mean': m, 'var': v}
[ "def", "wmean_and_var", "(", "W", ",", "x", ")", ":", "m", "=", "np", ".", "average", "(", "x", ",", "weights", "=", "W", ",", "axis", "=", "0", ")", "m2", "=", "np", ".", "average", "(", "x", "**", "2", ",", "weights", "=", "W", ",", "axis", "=", "0", ")", "v", "=", "m2", "-", "m", "**", "2", "return", "{", "'mean'", ":", "m", ",", "'var'", ":", "v", "}" ]
Component-wise weighted mean and variance. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: ndarray (such that shape[0]==N) data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances}
[ "Component", "-", "wise", "weighted", "mean", "and", "variance", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L306-L324
237,970
nchopin/particles
particles/resampling.py
wmean_and_var_str_array
def wmean_and_var_str_array(W, x): """Weighted mean and variance of each component of a structured array. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: (N,) structured array data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.empty(shape=x.shape[1:], dtype=x.dtype) v = np.empty_like(m) for p in x.dtype.names: m[p], v[p] = wmean_and_var(W, x[p]).values() return {'mean': m, 'var': v}
python
def wmean_and_var_str_array(W, x): """Weighted mean and variance of each component of a structured array. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: (N,) structured array data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances} """ m = np.empty(shape=x.shape[1:], dtype=x.dtype) v = np.empty_like(m) for p in x.dtype.names: m[p], v[p] = wmean_and_var(W, x[p]).values() return {'mean': m, 'var': v}
[ "def", "wmean_and_var_str_array", "(", "W", ",", "x", ")", ":", "m", "=", "np", ".", "empty", "(", "shape", "=", "x", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "x", ".", "dtype", ")", "v", "=", "np", ".", "empty_like", "(", "m", ")", "for", "p", "in", "x", ".", "dtype", ".", "names", ":", "m", "[", "p", "]", ",", "v", "[", "p", "]", "=", "wmean_and_var", "(", "W", ",", "x", "[", "p", "]", ")", ".", "values", "(", ")", "return", "{", "'mean'", ":", "m", ",", "'var'", ":", "v", "}" ]
Weighted mean and variance of each component of a structured array. Parameters ---------- W: (N,) ndarray normalised weights (must be >=0 and sum to one). x: (N,) structured array data Returns ------- dictionary {'mean':weighted_means, 'var':weighted_variances}
[ "Weighted", "mean", "and", "variance", "of", "each", "component", "of", "a", "structured", "array", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L326-L345
237,971
nchopin/particles
particles/resampling.py
wquantiles
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)): """Quantiles for weighted data. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) or (N,d) ndarray data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- a (k,) or (d, k) ndarray containing the alpha-quantiles """ if len(x.shape) == 1: return _wquantiles(W, x, alphas=alphas) elif len(x.shape) == 2: return np.array([_wquantiles(W, x[:, i], alphas=alphas) for i in range(x.shape[1])])
python
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)): """Quantiles for weighted data. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) or (N,d) ndarray data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- a (k,) or (d, k) ndarray containing the alpha-quantiles """ if len(x.shape) == 1: return _wquantiles(W, x, alphas=alphas) elif len(x.shape) == 2: return np.array([_wquantiles(W, x[:, i], alphas=alphas) for i in range(x.shape[1])])
[ "def", "wquantiles", "(", "W", ",", "x", ",", "alphas", "=", "(", "0.25", ",", "0.50", ",", "0.75", ")", ")", ":", "if", "len", "(", "x", ".", "shape", ")", "==", "1", ":", "return", "_wquantiles", "(", "W", ",", "x", ",", "alphas", "=", "alphas", ")", "elif", "len", "(", "x", ".", "shape", ")", "==", "2", ":", "return", "np", ".", "array", "(", "[", "_wquantiles", "(", "W", ",", "x", "[", ":", ",", "i", "]", ",", "alphas", "=", "alphas", ")", "for", "i", "in", "range", "(", "x", ".", "shape", "[", "1", "]", ")", "]", ")" ]
Quantiles for weighted data. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) or (N,d) ndarray data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- a (k,) or (d, k) ndarray containing the alpha-quantiles
[ "Quantiles", "for", "weighted", "data", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L359-L379
237,972
nchopin/particles
particles/resampling.py
wquantiles_str_array
def wquantiles_str_array(W, x, alphas=(0.25, 0.50, 0,75)): """quantiles for weighted data stored in a structured array. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) structured array data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- dictionary {p: quantiles} that stores for each field name p the corresponding quantiles """ return {p: wquantiles(W, x[p], alphas) for p in x.dtype.names}
python
def wquantiles_str_array(W, x, alphas=(0.25, 0.50, 0,75)): """quantiles for weighted data stored in a structured array. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) structured array data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- dictionary {p: quantiles} that stores for each field name p the corresponding quantiles """ return {p: wquantiles(W, x[p], alphas) for p in x.dtype.names}
[ "def", "wquantiles_str_array", "(", "W", ",", "x", ",", "alphas", "=", "(", "0.25", ",", "0.50", ",", "0", ",", "75", ")", ")", ":", "return", "{", "p", ":", "wquantiles", "(", "W", ",", "x", "[", "p", "]", ",", "alphas", ")", "for", "p", "in", "x", ".", "dtype", ".", "names", "}" ]
quantiles for weighted data stored in a structured array. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) structured array data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- dictionary {p: quantiles} that stores for each field name p the corresponding quantiles
[ "quantiles", "for", "weighted", "data", "stored", "in", "a", "structured", "array", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L381-L399
237,973
nchopin/particles
particles/resampling.py
resampling_scheme
def resampling_scheme(func): """Decorator for resampling schemes.""" @functools.wraps(func) def modif_func(W, M=None): M = W.shape[0] if M is None else M return func(W, M) rs_funcs[func.__name__] = modif_func modif_func.__doc__ = rs_doc % func.__name__.capitalize() return modif_func
python
def resampling_scheme(func): """Decorator for resampling schemes.""" @functools.wraps(func) def modif_func(W, M=None): M = W.shape[0] if M is None else M return func(W, M) rs_funcs[func.__name__] = modif_func modif_func.__doc__ = rs_doc % func.__name__.capitalize() return modif_func
[ "def", "resampling_scheme", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "modif_func", "(", "W", ",", "M", "=", "None", ")", ":", "M", "=", "W", ".", "shape", "[", "0", "]", "if", "M", "is", "None", "else", "M", "return", "func", "(", "W", ",", "M", ")", "rs_funcs", "[", "func", ".", "__name__", "]", "=", "modif_func", "modif_func", ".", "__doc__", "=", "rs_doc", "%", "func", ".", "__name__", ".", "capitalize", "(", ")", "return", "modif_func" ]
Decorator for resampling schemes.
[ "Decorator", "for", "resampling", "schemes", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L423-L433
237,974
nchopin/particles
particles/resampling.py
inverse_cdf
def inverse_cdf(su, W): """Inverse CDF algorithm for a finite distribution. Parameters ---------- su: (M,) ndarray M sorted uniform variates (i.e. M ordered points in [0,1]). W: (N,) ndarray a vector of N normalized weights (>=0 and sum to one) Returns ------- A: (M,) ndarray a vector of M indices in range 0, ..., N-1 """ j = 0 s = W[0] M = su.shape[0] A = np.empty(M, 'int') for n in range(M): while su[n] > s: j += 1 s += W[j] A[n] = j return A
python
def inverse_cdf(su, W): """Inverse CDF algorithm for a finite distribution. Parameters ---------- su: (M,) ndarray M sorted uniform variates (i.e. M ordered points in [0,1]). W: (N,) ndarray a vector of N normalized weights (>=0 and sum to one) Returns ------- A: (M,) ndarray a vector of M indices in range 0, ..., N-1 """ j = 0 s = W[0] M = su.shape[0] A = np.empty(M, 'int') for n in range(M): while su[n] > s: j += 1 s += W[j] A[n] = j return A
[ "def", "inverse_cdf", "(", "su", ",", "W", ")", ":", "j", "=", "0", "s", "=", "W", "[", "0", "]", "M", "=", "su", ".", "shape", "[", "0", "]", "A", "=", "np", ".", "empty", "(", "M", ",", "'int'", ")", "for", "n", "in", "range", "(", "M", ")", ":", "while", "su", "[", "n", "]", ">", "s", ":", "j", "+=", "1", "s", "+=", "W", "[", "j", "]", "A", "[", "n", "]", "=", "j", "return", "A" ]
Inverse CDF algorithm for a finite distribution. Parameters ---------- su: (M,) ndarray M sorted uniform variates (i.e. M ordered points in [0,1]). W: (N,) ndarray a vector of N normalized weights (>=0 and sum to one) Returns ------- A: (M,) ndarray a vector of M indices in range 0, ..., N-1
[ "Inverse", "CDF", "algorithm", "for", "a", "finite", "distribution", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L443-L467
237,975
nchopin/particles
particles/hilbert.py
hilbert_array
def hilbert_array(xint): """Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices """ N, d = xint.shape h = np.zeros(N, int64) for n in range(N): h[n] = Hilbert_to_int(xint[n, :]) return h
python
def hilbert_array(xint): """Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices """ N, d = xint.shape h = np.zeros(N, int64) for n in range(N): h[n] = Hilbert_to_int(xint[n, :]) return h
[ "def", "hilbert_array", "(", "xint", ")", ":", "N", ",", "d", "=", "xint", ".", "shape", "h", "=", "np", ".", "zeros", "(", "N", ",", "int64", ")", "for", "n", "in", "range", "(", "N", ")", ":", "h", "[", "n", "]", "=", "Hilbert_to_int", "(", "xint", "[", "n", ",", ":", "]", ")", "return", "h" ]
Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices
[ "Compute", "Hilbert", "indices", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/hilbert.py#L17-L33
237,976
nchopin/particles
particles/mcmc.py
MCMC.mean_sq_jump_dist
def mean_sq_jump_dist(self, discard_frac=0.1): """Mean squared jumping distance estimated from chain. Parameters ---------- discard_frac: float fraction of iterations to discard at the beginning (as a burn-in) Returns ------- float """ discard = int(self.niter * discard_frac) return msjd(self.chain.theta[discard:])
python
def mean_sq_jump_dist(self, discard_frac=0.1): """Mean squared jumping distance estimated from chain. Parameters ---------- discard_frac: float fraction of iterations to discard at the beginning (as a burn-in) Returns ------- float """ discard = int(self.niter * discard_frac) return msjd(self.chain.theta[discard:])
[ "def", "mean_sq_jump_dist", "(", "self", ",", "discard_frac", "=", "0.1", ")", ":", "discard", "=", "int", "(", "self", ".", "niter", "*", "discard_frac", ")", "return", "msjd", "(", "self", ".", "chain", ".", "theta", "[", "discard", ":", "]", ")" ]
Mean squared jumping distance estimated from chain. Parameters ---------- discard_frac: float fraction of iterations to discard at the beginning (as a burn-in) Returns ------- float
[ "Mean", "squared", "jumping", "distance", "estimated", "from", "chain", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/mcmc.py#L99-L112
237,977
nchopin/particles
particles/mcmc.py
VanishCovTracker.update
def update(self, v): """Adds point v""" self.t += 1 g = self.gamma() self.mu = (1. - g) * self.mu + g * v mv = v - self.mu self.Sigma = ((1. - g) * self.Sigma + g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :])) try: self.L = cholesky(self.Sigma, lower=True) except LinAlgError: self.L = self.L0
python
def update(self, v): """Adds point v""" self.t += 1 g = self.gamma() self.mu = (1. - g) * self.mu + g * v mv = v - self.mu self.Sigma = ((1. - g) * self.Sigma + g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :])) try: self.L = cholesky(self.Sigma, lower=True) except LinAlgError: self.L = self.L0
[ "def", "update", "(", "self", ",", "v", ")", ":", "self", ".", "t", "+=", "1", "g", "=", "self", ".", "gamma", "(", ")", "self", ".", "mu", "=", "(", "1.", "-", "g", ")", "*", "self", ".", "mu", "+", "g", "*", "v", "mv", "=", "v", "-", "self", ".", "mu", "self", ".", "Sigma", "=", "(", "(", "1.", "-", "g", ")", "*", "self", ".", "Sigma", "+", "g", "*", "np", ".", "dot", "(", "mv", "[", ":", ",", "np", ".", "newaxis", "]", ",", "mv", "[", "np", ".", "newaxis", ",", ":", "]", ")", ")", "try", ":", "self", ".", "L", "=", "cholesky", "(", "self", ".", "Sigma", ",", "lower", "=", "True", ")", "except", "LinAlgError", ":", "self", ".", "L", "=", "self", ".", "L0" ]
Adds point v
[ "Adds", "point", "v" ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/mcmc.py#L161-L172
237,978
nchopin/particles
particles/utils.py
cartesian_lists
def cartesian_lists(d): """ turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists Example ------- cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]} returns [ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ] """ return [{k: v for k, v in zip(d.keys(), args)} for args in itertools.product(*d.values())]
python
def cartesian_lists(d): """ turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists Example ------- cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]} returns [ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ] """ return [{k: v for k, v in zip(d.keys(), args)} for args in itertools.product(*d.values())]
[ "def", "cartesian_lists", "(", "d", ")", ":", "return", "[", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "d", ".", "keys", "(", ")", ",", "args", ")", "}", "for", "args", "in", "itertools", ".", "product", "(", "*", "d", ".", "values", "(", ")", ")", "]" ]
turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists Example ------- cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]} returns [ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ]
[ "turns", "a", "dict", "of", "lists", "into", "a", "list", "of", "dicts", "that", "represents", "the", "cartesian", "product", "of", "the", "initial", "lists" ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L87-L100
237,979
nchopin/particles
particles/utils.py
cartesian_args
def cartesian_args(args, listargs, dictargs): """ Compute a list of inputs and outputs for a function with kw arguments. args: dict fixed arguments, e.g. {'x': 3}, then x=3 for all inputs listargs: dict arguments specified as a list; then the inputs should be the Cartesian products of these lists dictargs: dict same as above, except the key will be used in the output (see module doc for more explanation) """ ils = {k: [v, ] for k, v in args.items()} ils.update(listargs) ils.update({k: v.values() for k, v in dictargs.items()}) ols = listargs.copy() ols.update({k: v.keys() for k, v in dictargs.items()}) return cartesian_lists(ils), cartesian_lists(ols)
python
def cartesian_args(args, listargs, dictargs): """ Compute a list of inputs and outputs for a function with kw arguments. args: dict fixed arguments, e.g. {'x': 3}, then x=3 for all inputs listargs: dict arguments specified as a list; then the inputs should be the Cartesian products of these lists dictargs: dict same as above, except the key will be used in the output (see module doc for more explanation) """ ils = {k: [v, ] for k, v in args.items()} ils.update(listargs) ils.update({k: v.values() for k, v in dictargs.items()}) ols = listargs.copy() ols.update({k: v.keys() for k, v in dictargs.items()}) return cartesian_lists(ils), cartesian_lists(ols)
[ "def", "cartesian_args", "(", "args", ",", "listargs", ",", "dictargs", ")", ":", "ils", "=", "{", "k", ":", "[", "v", ",", "]", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", "}", "ils", ".", "update", "(", "listargs", ")", "ils", ".", "update", "(", "{", "k", ":", "v", ".", "values", "(", ")", "for", "k", ",", "v", "in", "dictargs", ".", "items", "(", ")", "}", ")", "ols", "=", "listargs", ".", "copy", "(", ")", "ols", ".", "update", "(", "{", "k", ":", "v", ".", "keys", "(", ")", "for", "k", ",", "v", "in", "dictargs", ".", "items", "(", ")", "}", ")", "return", "cartesian_lists", "(", "ils", ")", ",", "cartesian_lists", "(", "ols", ")" ]
Compute a list of inputs and outputs for a function with kw arguments. args: dict fixed arguments, e.g. {'x': 3}, then x=3 for all inputs listargs: dict arguments specified as a list; then the inputs should be the Cartesian products of these lists dictargs: dict same as above, except the key will be used in the output (see module doc for more explanation)
[ "Compute", "a", "list", "of", "inputs", "and", "outputs", "for", "a", "function", "with", "kw", "arguments", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L103-L122
237,980
nchopin/particles
particles/utils.py
worker
def worker(qin, qout, f): """Worker for muliprocessing. A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments, until the input queue is empty. """ while not qin.empty(): i, args = qin.get() qout.put((i, f(**args)))
python
def worker(qin, qout, f): """Worker for muliprocessing. A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments, until the input queue is empty. """ while not qin.empty(): i, args = qin.get() qout.put((i, f(**args)))
[ "def", "worker", "(", "qin", ",", "qout", ",", "f", ")", ":", "while", "not", "qin", ".", "empty", "(", ")", ":", "i", ",", "args", "=", "qin", ".", "get", "(", ")", "qout", ".", "put", "(", "(", "i", ",", "f", "(", "*", "*", "args", ")", ")", ")" ]
Worker for muliprocessing. A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments, until the input queue is empty.
[ "Worker", "for", "muliprocessing", ".", "A", "worker", "repeatedly", "picks", "a", "dict", "of", "arguments", "in", "the", "queue", "and", "computes", "f", "for", "this", "set", "of", "arguments", "until", "the", "input", "queue", "is", "empty", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L133-L141
237,981
nchopin/particles
particles/utils.py
distinct_seeds
def distinct_seeds(k): """ returns k distinct seeds for random number generation """ seeds = [] for _ in range(k): while True: s = random.randint(2**32 - 1) if s not in seeds: break seeds.append(s) return seeds
python
def distinct_seeds(k): """ returns k distinct seeds for random number generation """ seeds = [] for _ in range(k): while True: s = random.randint(2**32 - 1) if s not in seeds: break seeds.append(s) return seeds
[ "def", "distinct_seeds", "(", "k", ")", ":", "seeds", "=", "[", "]", "for", "_", "in", "range", "(", "k", ")", ":", "while", "True", ":", "s", "=", "random", ".", "randint", "(", "2", "**", "32", "-", "1", ")", "if", "s", "not", "in", "seeds", ":", "break", "seeds", ".", "append", "(", "s", ")", "return", "seeds" ]
returns k distinct seeds for random number generation
[ "returns", "k", "distinct", "seeds", "for", "random", "number", "generation" ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L179-L189
237,982
nchopin/particles
particles/utils.py
multiplexer
def multiplexer(f=None, nruns=1, nprocs=1, seeding=None, **args): """Evaluate a function for different parameters, optionally in parallel. Parameters ---------- f: function function f to evaluate, must take only kw arguments as inputs nruns: int number of evaluations of f for each set of arguments nprocs: int + if <=0, set to actual number of physical processors plus nprocs (i.e. -1 => number of cpus on your machine minus one) Default is 1, which means no multiprocessing seeding: bool (default: True if nruns > 1, False otherwise) whether we need to provide different seeds for RNGS **args: keyword arguments for function f. Note ---- see documentation of `utils` """ if not callable(f): raise ValueError('multiplexer: function f missing, or not callable') if seeding is None: seeding = (nruns > 1) # extra arguments (meant to be arguments for f) fixedargs, listargs, dictargs = {}, {}, {} listargs['run'] = list(range(nruns)) for k, v in args.items(): if isinstance(v, list): listargs[k] = v elif isinstance(v, dict): dictargs[k] = v else: fixedargs[k] = v # cartesian product inputs, outputs = cartesian_args(fixedargs, listargs, dictargs) for ip in inputs: ip.pop('run') # run is not an argument of f, just an id for output # distributing different seeds if seeding: seeds = distinct_seeds(len(inputs)) for ip, op, s in zip(inputs, outputs, seeds): ip['seed'] = s op['seed'] = s # the actual work happens here return distribute_work(f, inputs, outputs, nprocs=nprocs)
python
def multiplexer(f=None, nruns=1, nprocs=1, seeding=None, **args): """Evaluate a function for different parameters, optionally in parallel. Parameters ---------- f: function function f to evaluate, must take only kw arguments as inputs nruns: int number of evaluations of f for each set of arguments nprocs: int + if <=0, set to actual number of physical processors plus nprocs (i.e. -1 => number of cpus on your machine minus one) Default is 1, which means no multiprocessing seeding: bool (default: True if nruns > 1, False otherwise) whether we need to provide different seeds for RNGS **args: keyword arguments for function f. Note ---- see documentation of `utils` """ if not callable(f): raise ValueError('multiplexer: function f missing, or not callable') if seeding is None: seeding = (nruns > 1) # extra arguments (meant to be arguments for f) fixedargs, listargs, dictargs = {}, {}, {} listargs['run'] = list(range(nruns)) for k, v in args.items(): if isinstance(v, list): listargs[k] = v elif isinstance(v, dict): dictargs[k] = v else: fixedargs[k] = v # cartesian product inputs, outputs = cartesian_args(fixedargs, listargs, dictargs) for ip in inputs: ip.pop('run') # run is not an argument of f, just an id for output # distributing different seeds if seeding: seeds = distinct_seeds(len(inputs)) for ip, op, s in zip(inputs, outputs, seeds): ip['seed'] = s op['seed'] = s # the actual work happens here return distribute_work(f, inputs, outputs, nprocs=nprocs)
[ "def", "multiplexer", "(", "f", "=", "None", ",", "nruns", "=", "1", ",", "nprocs", "=", "1", ",", "seeding", "=", "None", ",", "*", "*", "args", ")", ":", "if", "not", "callable", "(", "f", ")", ":", "raise", "ValueError", "(", "'multiplexer: function f missing, or not callable'", ")", "if", "seeding", "is", "None", ":", "seeding", "=", "(", "nruns", ">", "1", ")", "# extra arguments (meant to be arguments for f)", "fixedargs", ",", "listargs", ",", "dictargs", "=", "{", "}", ",", "{", "}", ",", "{", "}", "listargs", "[", "'run'", "]", "=", "list", "(", "range", "(", "nruns", ")", ")", "for", "k", ",", "v", "in", "args", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "listargs", "[", "k", "]", "=", "v", "elif", "isinstance", "(", "v", ",", "dict", ")", ":", "dictargs", "[", "k", "]", "=", "v", "else", ":", "fixedargs", "[", "k", "]", "=", "v", "# cartesian product", "inputs", ",", "outputs", "=", "cartesian_args", "(", "fixedargs", ",", "listargs", ",", "dictargs", ")", "for", "ip", "in", "inputs", ":", "ip", ".", "pop", "(", "'run'", ")", "# run is not an argument of f, just an id for output", "# distributing different seeds", "if", "seeding", ":", "seeds", "=", "distinct_seeds", "(", "len", "(", "inputs", ")", ")", "for", "ip", ",", "op", ",", "s", "in", "zip", "(", "inputs", ",", "outputs", ",", "seeds", ")", ":", "ip", "[", "'seed'", "]", "=", "s", "op", "[", "'seed'", "]", "=", "s", "# the actual work happens here", "return", "distribute_work", "(", "f", ",", "inputs", ",", "outputs", ",", "nprocs", "=", "nprocs", ")" ]
Evaluate a function for different parameters, optionally in parallel. Parameters ---------- f: function function f to evaluate, must take only kw arguments as inputs nruns: int number of evaluations of f for each set of arguments nprocs: int + if <=0, set to actual number of physical processors plus nprocs (i.e. -1 => number of cpus on your machine minus one) Default is 1, which means no multiprocessing seeding: bool (default: True if nruns > 1, False otherwise) whether we need to provide different seeds for RNGS **args: keyword arguments for function f. Note ---- see documentation of `utils`
[ "Evaluate", "a", "function", "for", "different", "parameters", "optionally", "in", "parallel", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L192-L240
237,983
nchopin/particles
particles/state_space_models.py
StateSpaceModel.simulate
def simulate(self, T): """Simulate state and observation processes. Parameters ---------- T: int processes are simulated from time 0 to time T-1 Returns ------- x, y: lists lists of length T """ x = [] for t in range(T): law_x = self.PX0() if t == 0 else self.PX(t, x[-1]) x.append(law_x.rvs(size=1)) y = self.simulate_given_x(x) return x, y
python
def simulate(self, T): """Simulate state and observation processes. Parameters ---------- T: int processes are simulated from time 0 to time T-1 Returns ------- x, y: lists lists of length T """ x = [] for t in range(T): law_x = self.PX0() if t == 0 else self.PX(t, x[-1]) x.append(law_x.rvs(size=1)) y = self.simulate_given_x(x) return x, y
[ "def", "simulate", "(", "self", ",", "T", ")", ":", "x", "=", "[", "]", "for", "t", "in", "range", "(", "T", ")", ":", "law_x", "=", "self", ".", "PX0", "(", ")", "if", "t", "==", "0", "else", "self", ".", "PX", "(", "t", ",", "x", "[", "-", "1", "]", ")", "x", ".", "append", "(", "law_x", ".", "rvs", "(", "size", "=", "1", ")", ")", "y", "=", "self", ".", "simulate_given_x", "(", "x", ")", "return", "x", ",", "y" ]
Simulate state and observation processes. Parameters ---------- T: int processes are simulated from time 0 to time T-1 Returns ------- x, y: lists lists of length T
[ "Simulate", "state", "and", "observation", "processes", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/state_space_models.py#L280-L298
237,984
nchopin/particles
book/mle/malikpitt_interpolation.py
interpoled_resampling
def interpoled_resampling(W, x): """Resampling based on an interpolated CDF, as described in Malik and Pitt. Parameters ---------- W: (N,) array weights x: (N,) array particles Returns ------- xrs: (N,) array the resampled particles """ N = W.shape[0] idx = np.argsort(x) xs = x[idx] ws = W[idx] cs = np.cumsum(avg_n_nplusone(ws)) u = random.rand(N) xrs = np.empty(N) where = np.searchsorted(cs, u) # costs O(N log(N)) but algorithm has O(N log(N)) complexity anyway for n in range(N): m = where[n] if m==0: xrs[n] = xs[0] elif m==N: xrs[n] = xs[-1] else: xrs[n] = interpol(cs[m-1], cs[m], xs[m-1], xs[m], u[n]) return xrs
python
def interpoled_resampling(W, x): """Resampling based on an interpolated CDF, as described in Malik and Pitt. Parameters ---------- W: (N,) array weights x: (N,) array particles Returns ------- xrs: (N,) array the resampled particles """ N = W.shape[0] idx = np.argsort(x) xs = x[idx] ws = W[idx] cs = np.cumsum(avg_n_nplusone(ws)) u = random.rand(N) xrs = np.empty(N) where = np.searchsorted(cs, u) # costs O(N log(N)) but algorithm has O(N log(N)) complexity anyway for n in range(N): m = where[n] if m==0: xrs[n] = xs[0] elif m==N: xrs[n] = xs[-1] else: xrs[n] = interpol(cs[m-1], cs[m], xs[m-1], xs[m], u[n]) return xrs
[ "def", "interpoled_resampling", "(", "W", ",", "x", ")", ":", "N", "=", "W", ".", "shape", "[", "0", "]", "idx", "=", "np", ".", "argsort", "(", "x", ")", "xs", "=", "x", "[", "idx", "]", "ws", "=", "W", "[", "idx", "]", "cs", "=", "np", ".", "cumsum", "(", "avg_n_nplusone", "(", "ws", ")", ")", "u", "=", "random", ".", "rand", "(", "N", ")", "xrs", "=", "np", ".", "empty", "(", "N", ")", "where", "=", "np", ".", "searchsorted", "(", "cs", ",", "u", ")", "# costs O(N log(N)) but algorithm has O(N log(N)) complexity anyway", "for", "n", "in", "range", "(", "N", ")", ":", "m", "=", "where", "[", "n", "]", "if", "m", "==", "0", ":", "xrs", "[", "n", "]", "=", "xs", "[", "0", "]", "elif", "m", "==", "N", ":", "xrs", "[", "n", "]", "=", "xs", "[", "-", "1", "]", "else", ":", "xrs", "[", "n", "]", "=", "interpol", "(", "cs", "[", "m", "-", "1", "]", ",", "cs", "[", "m", "]", ",", "xs", "[", "m", "-", "1", "]", ",", "xs", "[", "m", "]", ",", "u", "[", "n", "]", ")", "return", "xrs" ]
Resampling based on an interpolated CDF, as described in Malik and Pitt. Parameters ---------- W: (N,) array weights x: (N,) array particles Returns ------- xrs: (N,) array the resampled particles
[ "Resampling", "based", "on", "an", "interpolated", "CDF", "as", "described", "in", "Malik", "and", "Pitt", "." ]
3faa97a1073db45c5889eef3e015dd76ef350b52
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/book/mle/malikpitt_interpolation.py#L26-L59
237,985
Fortran-FOSS-Programmers/ford
ford/sourceform.py
sort_items
def sort_items(self,items,args=False): """ Sort the `self`'s contents, as contained in the list `items` as specified in `self`'s meta-data. """ if self.settings['sort'].lower() == 'src': return def alpha(i): return i.name def permission(i): if args: if i.intent == 'in': return 'b' if i.intent == 'inout': return 'c' if i.intent == 'out': return 'd' if i.intent == '': return 'e' perm = getattr(i, 'permission', '') if perm == 'public': return 'b' if perm == 'protected': return 'c' if perm == 'private': return 'd' return 'a' def permission_alpha(i): return permission(i) + '-' + i.name def itype(i): if i.obj == 'variable': retstr = i.vartype if retstr == 'class': retstr = 'type' if i.kind: retstr = retstr + '-' + str(i.kind) if i.strlen: retstr = retstr + '-' + str(i.strlen) if i.proto: retstr = retstr + '-' + i.proto[0] return retstr elif i.obj == 'proc': if i.proctype != 'Function': return i.proctype.lower() else: return i.proctype.lower() + '-' + itype(i.retvar) else: return i.obj def itype_alpha(i): return itype(i) + '-' + i.name if self.settings['sort'].lower() == 'alpha': items.sort(key=alpha) elif self.settings['sort'].lower() == 'permission': items.sort(key=permission) elif self.settings['sort'].lower() == 'permission-alpha': items.sort(key=permission_alpha) elif self.settings['sort'].lower() == 'type': items.sort(key=itype) elif self.settings['sort'].lower() == 'type-alpha': items.sort(key=itype_alpha)
python
def sort_items(self,items,args=False): """ Sort the `self`'s contents, as contained in the list `items` as specified in `self`'s meta-data. """ if self.settings['sort'].lower() == 'src': return def alpha(i): return i.name def permission(i): if args: if i.intent == 'in': return 'b' if i.intent == 'inout': return 'c' if i.intent == 'out': return 'd' if i.intent == '': return 'e' perm = getattr(i, 'permission', '') if perm == 'public': return 'b' if perm == 'protected': return 'c' if perm == 'private': return 'd' return 'a' def permission_alpha(i): return permission(i) + '-' + i.name def itype(i): if i.obj == 'variable': retstr = i.vartype if retstr == 'class': retstr = 'type' if i.kind: retstr = retstr + '-' + str(i.kind) if i.strlen: retstr = retstr + '-' + str(i.strlen) if i.proto: retstr = retstr + '-' + i.proto[0] return retstr elif i.obj == 'proc': if i.proctype != 'Function': return i.proctype.lower() else: return i.proctype.lower() + '-' + itype(i.retvar) else: return i.obj def itype_alpha(i): return itype(i) + '-' + i.name if self.settings['sort'].lower() == 'alpha': items.sort(key=alpha) elif self.settings['sort'].lower() == 'permission': items.sort(key=permission) elif self.settings['sort'].lower() == 'permission-alpha': items.sort(key=permission_alpha) elif self.settings['sort'].lower() == 'type': items.sort(key=itype) elif self.settings['sort'].lower() == 'type-alpha': items.sort(key=itype_alpha)
[ "def", "sort_items", "(", "self", ",", "items", ",", "args", "=", "False", ")", ":", "if", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'src'", ":", "return", "def", "alpha", "(", "i", ")", ":", "return", "i", ".", "name", "def", "permission", "(", "i", ")", ":", "if", "args", ":", "if", "i", ".", "intent", "==", "'in'", ":", "return", "'b'", "if", "i", ".", "intent", "==", "'inout'", ":", "return", "'c'", "if", "i", ".", "intent", "==", "'out'", ":", "return", "'d'", "if", "i", ".", "intent", "==", "''", ":", "return", "'e'", "perm", "=", "getattr", "(", "i", ",", "'permission'", ",", "''", ")", "if", "perm", "==", "'public'", ":", "return", "'b'", "if", "perm", "==", "'protected'", ":", "return", "'c'", "if", "perm", "==", "'private'", ":", "return", "'d'", "return", "'a'", "def", "permission_alpha", "(", "i", ")", ":", "return", "permission", "(", "i", ")", "+", "'-'", "+", "i", ".", "name", "def", "itype", "(", "i", ")", ":", "if", "i", ".", "obj", "==", "'variable'", ":", "retstr", "=", "i", ".", "vartype", "if", "retstr", "==", "'class'", ":", "retstr", "=", "'type'", "if", "i", ".", "kind", ":", "retstr", "=", "retstr", "+", "'-'", "+", "str", "(", "i", ".", "kind", ")", "if", "i", ".", "strlen", ":", "retstr", "=", "retstr", "+", "'-'", "+", "str", "(", "i", ".", "strlen", ")", "if", "i", ".", "proto", ":", "retstr", "=", "retstr", "+", "'-'", "+", "i", ".", "proto", "[", "0", "]", "return", "retstr", "elif", "i", ".", "obj", "==", "'proc'", ":", "if", "i", ".", "proctype", "!=", "'Function'", ":", "return", "i", ".", "proctype", ".", "lower", "(", ")", "else", ":", "return", "i", ".", "proctype", ".", "lower", "(", ")", "+", "'-'", "+", "itype", "(", "i", ".", "retvar", ")", "else", ":", "return", "i", ".", "obj", "def", "itype_alpha", "(", "i", ")", ":", "return", "itype", "(", "i", ")", "+", "'-'", "+", "i", ".", "name", "if", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'alpha'", ":", "items", ".", "sort", "(", "key", "=", "alpha", ")", "elif", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'permission'", ":", "items", ".", "sort", "(", "key", "=", "permission", ")", "elif", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'permission-alpha'", ":", "items", ".", "sort", "(", "key", "=", "permission_alpha", ")", "elif", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'type'", ":", "items", ".", "sort", "(", "key", "=", "itype", ")", "elif", "self", ".", "settings", "[", "'sort'", "]", ".", "lower", "(", ")", "==", "'type-alpha'", ":", "items", ".", "sort", "(", "key", "=", "itype_alpha", ")" ]
Sort the `self`'s contents, as contained in the list `items` as specified in `self`'s meta-data.
[ "Sort", "the", "self", "s", "contents", "as", "contained", "in", "the", "list", "items", "as", "specified", "in", "self", "s", "meta", "-", "data", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L2402-L2451
237,986
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranBase.contents_size
def contents_size(self): ''' Returns the number of different categories to be shown in the contents side-bar in the HTML documentation. ''' count = 0 if hasattr(self,'variables'): count += 1 if hasattr(self,'types'): count += 1 if hasattr(self,'modules'): count += 1 if hasattr(self,'submodules'): count += 1 if hasattr(self,'subroutines'): count += 1 if hasattr(self,'modprocedures'): count += 1 if hasattr(self,'functions'): count += 1 if hasattr(self,'interfaces'): count += 1 if hasattr(self,'absinterfaces'): count += 1 if hasattr(self,'programs'): count += 1 if hasattr(self,'boundprocs'): count += 1 if hasattr(self,'finalprocs'): count += 1 if hasattr(self,'enums'): count += 1 if hasattr(self,'procedure'): count += 1 if hasattr(self,'constructor'): count += 1 if hasattr(self,'modfunctions'): count += 1 if hasattr(self,'modsubroutines'): count += 1 if hasattr(self,'modprocs'): count += 1 if getattr(self,'src',None): count += 1 return count
python
def contents_size(self): ''' Returns the number of different categories to be shown in the contents side-bar in the HTML documentation. ''' count = 0 if hasattr(self,'variables'): count += 1 if hasattr(self,'types'): count += 1 if hasattr(self,'modules'): count += 1 if hasattr(self,'submodules'): count += 1 if hasattr(self,'subroutines'): count += 1 if hasattr(self,'modprocedures'): count += 1 if hasattr(self,'functions'): count += 1 if hasattr(self,'interfaces'): count += 1 if hasattr(self,'absinterfaces'): count += 1 if hasattr(self,'programs'): count += 1 if hasattr(self,'boundprocs'): count += 1 if hasattr(self,'finalprocs'): count += 1 if hasattr(self,'enums'): count += 1 if hasattr(self,'procedure'): count += 1 if hasattr(self,'constructor'): count += 1 if hasattr(self,'modfunctions'): count += 1 if hasattr(self,'modsubroutines'): count += 1 if hasattr(self,'modprocs'): count += 1 if getattr(self,'src',None): count += 1 return count
[ "def", "contents_size", "(", "self", ")", ":", "count", "=", "0", "if", "hasattr", "(", "self", ",", "'variables'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'types'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'modules'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'submodules'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'subroutines'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'modprocedures'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'functions'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'interfaces'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'absinterfaces'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'programs'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'boundprocs'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'finalprocs'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'enums'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'procedure'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'constructor'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'modfunctions'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'modsubroutines'", ")", ":", "count", "+=", "1", "if", "hasattr", "(", "self", ",", "'modprocs'", ")", ":", "count", "+=", "1", "if", "getattr", "(", "self", ",", "'src'", ",", "None", ")", ":", "count", "+=", "1", "return", "count" ]
Returns the number of different categories to be shown in the contents side-bar in the HTML documentation.
[ "Returns", "the", "number", "of", "different", "categories", "to", "be", "shown", "in", "the", "contents", "side", "-", "bar", "in", "the", "HTML", "documentation", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L267-L292
237,987
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranBase.sort
def sort(self): ''' Sorts components of the object. ''' if hasattr(self,'variables'): sort_items(self,self.variables) if hasattr(self,'modules'): sort_items(self,self.modules) if hasattr(self,'submodules'): sort_items(self,self.submodules) if hasattr(self,'common'): sort_items(self,self.common) if hasattr(self,'subroutines'): sort_items(self,self.subroutines) if hasattr(self,'modprocedures'): sort_items(self,self.modprocedures) if hasattr(self,'functions'): sort_items(self,self.functions) if hasattr(self,'interfaces'): sort_items(self,self.interfaces) if hasattr(self,'absinterfaces'): sort_items(self,self.absinterfaces) if hasattr(self,'types'): sort_items(self,self.types) if hasattr(self,'programs'): sort_items(self,self.programs) if hasattr(self,'blockdata'): sort_items(self,self.blockdata) if hasattr(self,'boundprocs'): sort_items(self,self.boundprocs) if hasattr(self,'finalprocs'): sort_items(self,self.finalprocs) if hasattr(self,'args'): #sort_items(self.args,args=True) pass
python
def sort(self): ''' Sorts components of the object. ''' if hasattr(self,'variables'): sort_items(self,self.variables) if hasattr(self,'modules'): sort_items(self,self.modules) if hasattr(self,'submodules'): sort_items(self,self.submodules) if hasattr(self,'common'): sort_items(self,self.common) if hasattr(self,'subroutines'): sort_items(self,self.subroutines) if hasattr(self,'modprocedures'): sort_items(self,self.modprocedures) if hasattr(self,'functions'): sort_items(self,self.functions) if hasattr(self,'interfaces'): sort_items(self,self.interfaces) if hasattr(self,'absinterfaces'): sort_items(self,self.absinterfaces) if hasattr(self,'types'): sort_items(self,self.types) if hasattr(self,'programs'): sort_items(self,self.programs) if hasattr(self,'blockdata'): sort_items(self,self.blockdata) if hasattr(self,'boundprocs'): sort_items(self,self.boundprocs) if hasattr(self,'finalprocs'): sort_items(self,self.finalprocs) if hasattr(self,'args'): #sort_items(self.args,args=True) pass
[ "def", "sort", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'variables'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "variables", ")", "if", "hasattr", "(", "self", ",", "'modules'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "modules", ")", "if", "hasattr", "(", "self", ",", "'submodules'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "submodules", ")", "if", "hasattr", "(", "self", ",", "'common'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "common", ")", "if", "hasattr", "(", "self", ",", "'subroutines'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "subroutines", ")", "if", "hasattr", "(", "self", ",", "'modprocedures'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "modprocedures", ")", "if", "hasattr", "(", "self", ",", "'functions'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "functions", ")", "if", "hasattr", "(", "self", ",", "'interfaces'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "interfaces", ")", "if", "hasattr", "(", "self", ",", "'absinterfaces'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "absinterfaces", ")", "if", "hasattr", "(", "self", ",", "'types'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "types", ")", "if", "hasattr", "(", "self", ",", "'programs'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "programs", ")", "if", "hasattr", "(", "self", ",", "'blockdata'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "blockdata", ")", "if", "hasattr", "(", "self", ",", "'boundprocs'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "boundprocs", ")", "if", "hasattr", "(", "self", ",", "'finalprocs'", ")", ":", "sort_items", "(", "self", ",", "self", ".", "finalprocs", ")", "if", "hasattr", "(", "self", ",", "'args'", ")", ":", "#sort_items(self.args,args=True)", "pass" ]
Sorts components of the object.
[ "Sorts", "components", "of", "the", "object", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L417-L451
237,988
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranBase.make_links
def make_links(self, project): """ Process intra-site links to documentation of other parts of the program. """ self.doc = ford.utils.sub_links(self.doc,project) if 'summary' in self.meta: self.meta['summary'] = ford.utils.sub_links(self.meta['summary'],project) # Create links in the project for item in self.iterator('variables', 'types', 'enums', 'modules', 'submodules', 'subroutines', 'functions', 'interfaces', 'absinterfaces', 'programs', 'boundprocs', 'args', 'bindings'): if isinstance(item, FortranBase): item.make_links(project) if hasattr(self, 'retvar'): if self.retvar: if isinstance(self.retvar, FortranBase): self.retvar.make_links(project) if hasattr(self, 'procedure'): if isinstance(self.procedure, FortranBase): self.procedure.make_links(project)
python
def make_links(self, project): """ Process intra-site links to documentation of other parts of the program. """ self.doc = ford.utils.sub_links(self.doc,project) if 'summary' in self.meta: self.meta['summary'] = ford.utils.sub_links(self.meta['summary'],project) # Create links in the project for item in self.iterator('variables', 'types', 'enums', 'modules', 'submodules', 'subroutines', 'functions', 'interfaces', 'absinterfaces', 'programs', 'boundprocs', 'args', 'bindings'): if isinstance(item, FortranBase): item.make_links(project) if hasattr(self, 'retvar'): if self.retvar: if isinstance(self.retvar, FortranBase): self.retvar.make_links(project) if hasattr(self, 'procedure'): if isinstance(self.procedure, FortranBase): self.procedure.make_links(project)
[ "def", "make_links", "(", "self", ",", "project", ")", ":", "self", ".", "doc", "=", "ford", ".", "utils", ".", "sub_links", "(", "self", ".", "doc", ",", "project", ")", "if", "'summary'", "in", "self", ".", "meta", ":", "self", ".", "meta", "[", "'summary'", "]", "=", "ford", ".", "utils", ".", "sub_links", "(", "self", ".", "meta", "[", "'summary'", "]", ",", "project", ")", "# Create links in the project", "for", "item", "in", "self", ".", "iterator", "(", "'variables'", ",", "'types'", ",", "'enums'", ",", "'modules'", ",", "'submodules'", ",", "'subroutines'", ",", "'functions'", ",", "'interfaces'", ",", "'absinterfaces'", ",", "'programs'", ",", "'boundprocs'", ",", "'args'", ",", "'bindings'", ")", ":", "if", "isinstance", "(", "item", ",", "FortranBase", ")", ":", "item", ".", "make_links", "(", "project", ")", "if", "hasattr", "(", "self", ",", "'retvar'", ")", ":", "if", "self", ".", "retvar", ":", "if", "isinstance", "(", "self", ".", "retvar", ",", "FortranBase", ")", ":", "self", ".", "retvar", ".", "make_links", "(", "project", ")", "if", "hasattr", "(", "self", ",", "'procedure'", ")", ":", "if", "isinstance", "(", "self", ".", "procedure", ",", "FortranBase", ")", ":", "self", ".", "procedure", ".", "make_links", "(", "project", ")" ]
Process intra-site links to documentation of other parts of the program.
[ "Process", "intra", "-", "site", "links", "to", "documentation", "of", "other", "parts", "of", "the", "program", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L454-L475
237,989
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranBase.iterator
def iterator(self, *argv): """ Iterator returning any list of elements via attribute lookup in `self` This iterator retains the order of the arguments """ for arg in argv: if hasattr(self, arg): for item in getattr(self, arg): yield item
python
def iterator(self, *argv): """ Iterator returning any list of elements via attribute lookup in `self` This iterator retains the order of the arguments """ for arg in argv: if hasattr(self, arg): for item in getattr(self, arg): yield item
[ "def", "iterator", "(", "self", ",", "*", "argv", ")", ":", "for", "arg", "in", "argv", ":", "if", "hasattr", "(", "self", ",", "arg", ")", ":", "for", "item", "in", "getattr", "(", "self", ",", "arg", ")", ":", "yield", "item" ]
Iterator returning any list of elements via attribute lookup in `self` This iterator retains the order of the arguments
[ "Iterator", "returning", "any", "list", "of", "elements", "via", "attribute", "lookup", "in", "self" ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L486-L493
237,990
Fortran-FOSS-Programmers/ford
ford/sourceform.py
FortranModule.get_used_entities
def get_used_entities(self,use_specs): """ Returns the entities which are imported by a use statement. These are contained in dicts. """ if len(use_specs.strip()) == 0: return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars) only = bool(self.ONLY_RE.match(use_specs)) use_specs = self.ONLY_RE.sub('',use_specs) ulist = self.SPLIT_RE.split(use_specs) ulist[-1] = ulist[-1].strip() uspecs = {} for item in ulist: match = self.RENAME_RE.search(item) if match: uspecs[match.group(1).lower()] = match.group(2) else: uspecs[item.lower()] = item ret_procs = {} ret_absints = {} ret_types = {} ret_vars = {} for name, obj in self.pub_procs.items(): name = name.lower() if only: if name in uspecs: ret_procs[name] = obj else: ret_procs[name] = obj for name, obj in self.pub_absints.items(): name = name.lower() if only: if name in uspecs: ret_absints[name] = obj else: ret_absints[name] = obj for name, obj in self.pub_types.items(): name = name.lower() if only: if name in uspecs: ret_types[name] = obj else: ret_types[name] = obj for name, obj in self.pub_vars.items(): name = name.lower() if only: if name in uspecs: ret_vars[name] = obj else: ret_vars[name] = obj return (ret_procs,ret_absints,ret_types,ret_vars)
python
def get_used_entities(self,use_specs): """ Returns the entities which are imported by a use statement. These are contained in dicts. """ if len(use_specs.strip()) == 0: return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars) only = bool(self.ONLY_RE.match(use_specs)) use_specs = self.ONLY_RE.sub('',use_specs) ulist = self.SPLIT_RE.split(use_specs) ulist[-1] = ulist[-1].strip() uspecs = {} for item in ulist: match = self.RENAME_RE.search(item) if match: uspecs[match.group(1).lower()] = match.group(2) else: uspecs[item.lower()] = item ret_procs = {} ret_absints = {} ret_types = {} ret_vars = {} for name, obj in self.pub_procs.items(): name = name.lower() if only: if name in uspecs: ret_procs[name] = obj else: ret_procs[name] = obj for name, obj in self.pub_absints.items(): name = name.lower() if only: if name in uspecs: ret_absints[name] = obj else: ret_absints[name] = obj for name, obj in self.pub_types.items(): name = name.lower() if only: if name in uspecs: ret_types[name] = obj else: ret_types[name] = obj for name, obj in self.pub_vars.items(): name = name.lower() if only: if name in uspecs: ret_vars[name] = obj else: ret_vars[name] = obj return (ret_procs,ret_absints,ret_types,ret_vars)
[ "def", "get_used_entities", "(", "self", ",", "use_specs", ")", ":", "if", "len", "(", "use_specs", ".", "strip", "(", ")", ")", "==", "0", ":", "return", "(", "self", ".", "pub_procs", ",", "self", ".", "pub_absints", ",", "self", ".", "pub_types", ",", "self", ".", "pub_vars", ")", "only", "=", "bool", "(", "self", ".", "ONLY_RE", ".", "match", "(", "use_specs", ")", ")", "use_specs", "=", "self", ".", "ONLY_RE", ".", "sub", "(", "''", ",", "use_specs", ")", "ulist", "=", "self", ".", "SPLIT_RE", ".", "split", "(", "use_specs", ")", "ulist", "[", "-", "1", "]", "=", "ulist", "[", "-", "1", "]", ".", "strip", "(", ")", "uspecs", "=", "{", "}", "for", "item", "in", "ulist", ":", "match", "=", "self", ".", "RENAME_RE", ".", "search", "(", "item", ")", "if", "match", ":", "uspecs", "[", "match", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "]", "=", "match", ".", "group", "(", "2", ")", "else", ":", "uspecs", "[", "item", ".", "lower", "(", ")", "]", "=", "item", "ret_procs", "=", "{", "}", "ret_absints", "=", "{", "}", "ret_types", "=", "{", "}", "ret_vars", "=", "{", "}", "for", "name", ",", "obj", "in", "self", ".", "pub_procs", ".", "items", "(", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "only", ":", "if", "name", "in", "uspecs", ":", "ret_procs", "[", "name", "]", "=", "obj", "else", ":", "ret_procs", "[", "name", "]", "=", "obj", "for", "name", ",", "obj", "in", "self", ".", "pub_absints", ".", "items", "(", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "only", ":", "if", "name", "in", "uspecs", ":", "ret_absints", "[", "name", "]", "=", "obj", "else", ":", "ret_absints", "[", "name", "]", "=", "obj", "for", "name", ",", "obj", "in", "self", ".", "pub_types", ".", "items", "(", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "only", ":", "if", "name", "in", "uspecs", ":", "ret_types", "[", "name", "]", "=", "obj", "else", ":", "ret_types", "[", "name", "]", "=", "obj", "for", "name", ",", "obj", "in", "self", ".", "pub_vars", ".", "items", "(", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "only", ":", "if", "name", "in", "uspecs", ":", "ret_vars", "[", "name", "]", "=", "obj", "else", ":", "ret_vars", "[", "name", "]", "=", "obj", "return", "(", "ret_procs", ",", "ret_absints", ",", "ret_types", ",", "ret_vars", ")" ]
Returns the entities which are imported by a use statement. These are contained in dicts.
[ "Returns", "the", "entities", "which", "are", "imported", "by", "a", "use", "statement", ".", "These", "are", "contained", "in", "dicts", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L1138-L1188
237,991
Fortran-FOSS-Programmers/ford
ford/sourceform.py
NameSelector.get_name
def get_name(self,item): """ Return the name for this item registered with this NameSelector. If no name has previously been registered, then generate a new one. """ if not isinstance(item,ford.sourceform.FortranBase): raise Exception('{} is not of a type derived from FortranBase'.format(str(item))) if item in self._items: return self._items[item] else: if item.get_dir() not in self._counts: self._counts[item.get_dir()] = {} if item.name in self._counts[item.get_dir()]: num = self._counts[item.get_dir()][item.name] + 1 else: num = 1 self._counts[item.get_dir()][item.name] = num name = item.name.lower().replace('<','lt') # name is already lower name = name.replace('>','gt') name = name.replace('/','SLASH') if name == '': name = '__unnamed__' if num > 1: name = name + '~' + str(num) self._items[item] = name return name
python
def get_name(self,item): """ Return the name for this item registered with this NameSelector. If no name has previously been registered, then generate a new one. """ if not isinstance(item,ford.sourceform.FortranBase): raise Exception('{} is not of a type derived from FortranBase'.format(str(item))) if item in self._items: return self._items[item] else: if item.get_dir() not in self._counts: self._counts[item.get_dir()] = {} if item.name in self._counts[item.get_dir()]: num = self._counts[item.get_dir()][item.name] + 1 else: num = 1 self._counts[item.get_dir()][item.name] = num name = item.name.lower().replace('<','lt') # name is already lower name = name.replace('>','gt') name = name.replace('/','SLASH') if name == '': name = '__unnamed__' if num > 1: name = name + '~' + str(num) self._items[item] = name return name
[ "def", "get_name", "(", "self", ",", "item", ")", ":", "if", "not", "isinstance", "(", "item", ",", "ford", ".", "sourceform", ".", "FortranBase", ")", ":", "raise", "Exception", "(", "'{} is not of a type derived from FortranBase'", ".", "format", "(", "str", "(", "item", ")", ")", ")", "if", "item", "in", "self", ".", "_items", ":", "return", "self", ".", "_items", "[", "item", "]", "else", ":", "if", "item", ".", "get_dir", "(", ")", "not", "in", "self", ".", "_counts", ":", "self", ".", "_counts", "[", "item", ".", "get_dir", "(", ")", "]", "=", "{", "}", "if", "item", ".", "name", "in", "self", ".", "_counts", "[", "item", ".", "get_dir", "(", ")", "]", ":", "num", "=", "self", ".", "_counts", "[", "item", ".", "get_dir", "(", ")", "]", "[", "item", ".", "name", "]", "+", "1", "else", ":", "num", "=", "1", "self", ".", "_counts", "[", "item", ".", "get_dir", "(", ")", "]", "[", "item", ".", "name", "]", "=", "num", "name", "=", "item", ".", "name", ".", "lower", "(", ")", ".", "replace", "(", "'<'", ",", "'lt'", ")", "# name is already lower", "name", "=", "name", ".", "replace", "(", "'>'", ",", "'gt'", ")", "name", "=", "name", ".", "replace", "(", "'/'", ",", "'SLASH'", ")", "if", "name", "==", "''", ":", "name", "=", "'__unnamed__'", "if", "num", ">", "1", ":", "name", "=", "name", "+", "'~'", "+", "str", "(", "num", ")", "self", ".", "_items", "[", "item", "]", "=", "name", "return", "name" ]
Return the name for this item registered with this NameSelector. If no name has previously been registered, then generate a new one.
[ "Return", "the", "name", "for", "this", "item", "registered", "with", "this", "NameSelector", ".", "If", "no", "name", "has", "previously", "been", "registered", "then", "generate", "a", "new", "one", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L2466-L2493
237,992
Fortran-FOSS-Programmers/ford
ford/__init__.py
main
def main(proj_data,proj_docs,md): """ Main driver of FORD. """ if proj_data['relative']: proj_data['project_url'] = '.' # Parse the files in your project project = ford.fortran_project.Project(proj_data) if len(project.files) < 1: print("Error: No source files with appropriate extension found in specified directory.") sys.exit(1) # Convert the documentation from Markdown to HTML. Make sure to properly # handle LateX and metadata. if proj_data['relative']: project.markdown(md,'..') else: project.markdown(md,proj_data['project_url']) project.correlate() if proj_data['relative']: project.make_links('..') else: project.make_links(proj_data['project_url']) # Convert summaries and descriptions to HTML if proj_data['relative']: ford.sourceform.set_base_url('.') if 'summary' in proj_data: proj_data['summary'] = md.convert(proj_data['summary']) proj_data['summary'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['summary']),proj_data['project_url']),project) if 'author_description' in proj_data: proj_data['author_description'] = md.convert(proj_data['author_description']) proj_data['author_description'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['author_description']),proj_data['project_url']),project) proj_docs_ = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_docs),proj_data['project_url']),project) # Process any pages if 'page_dir' in proj_data: page_tree = ford.pagetree.get_page_tree(os.path.normpath(proj_data['page_dir']),md) print() else: page_tree = None proj_data['pages'] = page_tree # Produce the documentation using Jinja2. Output it to the desired location # and copy any files that are needed (CSS, JS, images, fonts, source files, # etc.) docs = ford.output.Documentation(proj_data,proj_docs_,project,page_tree) docs.writeout() print('') return 0
python
def main(proj_data,proj_docs,md): """ Main driver of FORD. """ if proj_data['relative']: proj_data['project_url'] = '.' # Parse the files in your project project = ford.fortran_project.Project(proj_data) if len(project.files) < 1: print("Error: No source files with appropriate extension found in specified directory.") sys.exit(1) # Convert the documentation from Markdown to HTML. Make sure to properly # handle LateX and metadata. if proj_data['relative']: project.markdown(md,'..') else: project.markdown(md,proj_data['project_url']) project.correlate() if proj_data['relative']: project.make_links('..') else: project.make_links(proj_data['project_url']) # Convert summaries and descriptions to HTML if proj_data['relative']: ford.sourceform.set_base_url('.') if 'summary' in proj_data: proj_data['summary'] = md.convert(proj_data['summary']) proj_data['summary'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['summary']),proj_data['project_url']),project) if 'author_description' in proj_data: proj_data['author_description'] = md.convert(proj_data['author_description']) proj_data['author_description'] = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_data['author_description']),proj_data['project_url']),project) proj_docs_ = ford.utils.sub_links(ford.utils.sub_macros(ford.utils.sub_notes(proj_docs),proj_data['project_url']),project) # Process any pages if 'page_dir' in proj_data: page_tree = ford.pagetree.get_page_tree(os.path.normpath(proj_data['page_dir']),md) print() else: page_tree = None proj_data['pages'] = page_tree # Produce the documentation using Jinja2. Output it to the desired location # and copy any files that are needed (CSS, JS, images, fonts, source files, # etc.) docs = ford.output.Documentation(proj_data,proj_docs_,project,page_tree) docs.writeout() print('') return 0
[ "def", "main", "(", "proj_data", ",", "proj_docs", ",", "md", ")", ":", "if", "proj_data", "[", "'relative'", "]", ":", "proj_data", "[", "'project_url'", "]", "=", "'.'", "# Parse the files in your project", "project", "=", "ford", ".", "fortran_project", ".", "Project", "(", "proj_data", ")", "if", "len", "(", "project", ".", "files", ")", "<", "1", ":", "print", "(", "\"Error: No source files with appropriate extension found in specified directory.\"", ")", "sys", ".", "exit", "(", "1", ")", "# Convert the documentation from Markdown to HTML. Make sure to properly", "# handle LateX and metadata.", "if", "proj_data", "[", "'relative'", "]", ":", "project", ".", "markdown", "(", "md", ",", "'..'", ")", "else", ":", "project", ".", "markdown", "(", "md", ",", "proj_data", "[", "'project_url'", "]", ")", "project", ".", "correlate", "(", ")", "if", "proj_data", "[", "'relative'", "]", ":", "project", ".", "make_links", "(", "'..'", ")", "else", ":", "project", ".", "make_links", "(", "proj_data", "[", "'project_url'", "]", ")", "# Convert summaries and descriptions to HTML", "if", "proj_data", "[", "'relative'", "]", ":", "ford", ".", "sourceform", ".", "set_base_url", "(", "'.'", ")", "if", "'summary'", "in", "proj_data", ":", "proj_data", "[", "'summary'", "]", "=", "md", ".", "convert", "(", "proj_data", "[", "'summary'", "]", ")", "proj_data", "[", "'summary'", "]", "=", "ford", ".", "utils", ".", "sub_links", "(", "ford", ".", "utils", ".", "sub_macros", "(", "ford", ".", "utils", ".", "sub_notes", "(", "proj_data", "[", "'summary'", "]", ")", ",", "proj_data", "[", "'project_url'", "]", ")", ",", "project", ")", "if", "'author_description'", "in", "proj_data", ":", "proj_data", "[", "'author_description'", "]", "=", "md", ".", "convert", "(", "proj_data", "[", "'author_description'", "]", ")", "proj_data", "[", "'author_description'", "]", "=", "ford", ".", "utils", ".", "sub_links", "(", "ford", ".", "utils", ".", "sub_macros", "(", "ford", ".", "utils", ".", "sub_notes", "(", "proj_data", "[", "'author_description'", "]", ")", ",", "proj_data", "[", "'project_url'", "]", ")", ",", "project", ")", "proj_docs_", "=", "ford", ".", "utils", ".", "sub_links", "(", "ford", ".", "utils", ".", "sub_macros", "(", "ford", ".", "utils", ".", "sub_notes", "(", "proj_docs", ")", ",", "proj_data", "[", "'project_url'", "]", ")", ",", "project", ")", "# Process any pages", "if", "'page_dir'", "in", "proj_data", ":", "page_tree", "=", "ford", ".", "pagetree", ".", "get_page_tree", "(", "os", ".", "path", ".", "normpath", "(", "proj_data", "[", "'page_dir'", "]", ")", ",", "md", ")", "print", "(", ")", "else", ":", "page_tree", "=", "None", "proj_data", "[", "'pages'", "]", "=", "page_tree", "# Produce the documentation using Jinja2. Output it to the desired location", "# and copy any files that are needed (CSS, JS, images, fonts, source files,", "# etc.)", "docs", "=", "ford", ".", "output", ".", "Documentation", "(", "proj_data", ",", "proj_docs_", ",", "project", ",", "page_tree", ")", "docs", ".", "writeout", "(", ")", "print", "(", "''", ")", "return", "0" ]
Main driver of FORD.
[ "Main", "driver", "of", "FORD", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/__init__.py#L337-L382
237,993
Fortran-FOSS-Programmers/ford
ford/fixed2free2.py
convertToFree
def convertToFree(stream, length_limit=True): """Convert stream from fixed source form to free source form.""" linestack = [] for line in stream: convline = FortranLine(line, length_limit) if convline.is_regular: if convline.isContinuation and linestack: linestack[0].continueLine() for l in linestack: yield str(l) linestack = [] linestack.append(convline) for l in linestack: yield str(l)
python
def convertToFree(stream, length_limit=True): """Convert stream from fixed source form to free source form.""" linestack = [] for line in stream: convline = FortranLine(line, length_limit) if convline.is_regular: if convline.isContinuation and linestack: linestack[0].continueLine() for l in linestack: yield str(l) linestack = [] linestack.append(convline) for l in linestack: yield str(l)
[ "def", "convertToFree", "(", "stream", ",", "length_limit", "=", "True", ")", ":", "linestack", "=", "[", "]", "for", "line", "in", "stream", ":", "convline", "=", "FortranLine", "(", "line", ",", "length_limit", ")", "if", "convline", ".", "is_regular", ":", "if", "convline", ".", "isContinuation", "and", "linestack", ":", "linestack", "[", "0", "]", ".", "continueLine", "(", ")", "for", "l", "in", "linestack", ":", "yield", "str", "(", "l", ")", "linestack", "=", "[", "]", "linestack", ".", "append", "(", "convline", ")", "for", "l", "in", "linestack", ":", "yield", "str", "(", "l", ")" ]
Convert stream from fixed source form to free source form.
[ "Convert", "stream", "from", "fixed", "source", "form", "to", "free", "source", "form", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fixed2free2.py#L110-L127
237,994
Fortran-FOSS-Programmers/ford
ford/fixed2free2.py
FortranLine.continueLine
def continueLine(self): """Insert line continuation symbol at end of line.""" if not (self.isLong and self.is_regular): self.line_conv = self.line_conv.rstrip() + " &\n" else: temp = self.line_conv[:72].rstrip() + " &" self.line_conv = temp.ljust(72) + self.excess_line
python
def continueLine(self): """Insert line continuation symbol at end of line.""" if not (self.isLong and self.is_regular): self.line_conv = self.line_conv.rstrip() + " &\n" else: temp = self.line_conv[:72].rstrip() + " &" self.line_conv = temp.ljust(72) + self.excess_line
[ "def", "continueLine", "(", "self", ")", ":", "if", "not", "(", "self", ".", "isLong", "and", "self", ".", "is_regular", ")", ":", "self", ".", "line_conv", "=", "self", ".", "line_conv", ".", "rstrip", "(", ")", "+", "\" &\\n\"", "else", ":", "temp", "=", "self", ".", "line_conv", "[", ":", "72", "]", ".", "rstrip", "(", ")", "+", "\" &\"", "self", ".", "line_conv", "=", "temp", ".", "ljust", "(", "72", ")", "+", "self", ".", "excess_line" ]
Insert line continuation symbol at end of line.
[ "Insert", "line", "continuation", "symbol", "at", "end", "of", "line", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fixed2free2.py#L52-L59
237,995
Fortran-FOSS-Programmers/ford
ford/fortran_project.py
id_mods
def id_mods(obj,modlist,intrinsic_mods={},submodlist=[]): """ Match USE statements up with the right modules """ for i in range(len(obj.uses)): for candidate in modlist: if obj.uses[i][0].lower() == candidate.name.lower(): obj.uses[i] = [candidate, obj.uses[i][1]] break else: if obj.uses[i][0].lower() in intrinsic_mods: obj.uses[i] = [intrinsic_mods[obj.uses[i][0].lower()], obj.uses[i][1]] continue if getattr(obj,'ancestor',None): for submod in submodlist: if obj.ancestor.lower() == submod.name.lower(): obj.ancestor = submod break if hasattr(obj,'ancestor_mod'): for mod in modlist: if obj.ancestor_mod.lower() == mod.name.lower(): obj.ancestor_mod = mod break for modproc in getattr(obj,'modprocedures',[]): id_mods(modproc,modlist,intrinsic_mods) for func in getattr(obj,'functions',[]): id_mods(func,modlist,intrinsic_mods) for subroutine in getattr(obj,'subroutines',[]): id_mods(subroutine,modlist,intrinsic_mods)
python
def id_mods(obj,modlist,intrinsic_mods={},submodlist=[]): """ Match USE statements up with the right modules """ for i in range(len(obj.uses)): for candidate in modlist: if obj.uses[i][0].lower() == candidate.name.lower(): obj.uses[i] = [candidate, obj.uses[i][1]] break else: if obj.uses[i][0].lower() in intrinsic_mods: obj.uses[i] = [intrinsic_mods[obj.uses[i][0].lower()], obj.uses[i][1]] continue if getattr(obj,'ancestor',None): for submod in submodlist: if obj.ancestor.lower() == submod.name.lower(): obj.ancestor = submod break if hasattr(obj,'ancestor_mod'): for mod in modlist: if obj.ancestor_mod.lower() == mod.name.lower(): obj.ancestor_mod = mod break for modproc in getattr(obj,'modprocedures',[]): id_mods(modproc,modlist,intrinsic_mods) for func in getattr(obj,'functions',[]): id_mods(func,modlist,intrinsic_mods) for subroutine in getattr(obj,'subroutines',[]): id_mods(subroutine,modlist,intrinsic_mods)
[ "def", "id_mods", "(", "obj", ",", "modlist", ",", "intrinsic_mods", "=", "{", "}", ",", "submodlist", "=", "[", "]", ")", ":", "for", "i", "in", "range", "(", "len", "(", "obj", ".", "uses", ")", ")", ":", "for", "candidate", "in", "modlist", ":", "if", "obj", ".", "uses", "[", "i", "]", "[", "0", "]", ".", "lower", "(", ")", "==", "candidate", ".", "name", ".", "lower", "(", ")", ":", "obj", ".", "uses", "[", "i", "]", "=", "[", "candidate", ",", "obj", ".", "uses", "[", "i", "]", "[", "1", "]", "]", "break", "else", ":", "if", "obj", ".", "uses", "[", "i", "]", "[", "0", "]", ".", "lower", "(", ")", "in", "intrinsic_mods", ":", "obj", ".", "uses", "[", "i", "]", "=", "[", "intrinsic_mods", "[", "obj", ".", "uses", "[", "i", "]", "[", "0", "]", ".", "lower", "(", ")", "]", ",", "obj", ".", "uses", "[", "i", "]", "[", "1", "]", "]", "continue", "if", "getattr", "(", "obj", ",", "'ancestor'", ",", "None", ")", ":", "for", "submod", "in", "submodlist", ":", "if", "obj", ".", "ancestor", ".", "lower", "(", ")", "==", "submod", ".", "name", ".", "lower", "(", ")", ":", "obj", ".", "ancestor", "=", "submod", "break", "if", "hasattr", "(", "obj", ",", "'ancestor_mod'", ")", ":", "for", "mod", "in", "modlist", ":", "if", "obj", ".", "ancestor_mod", ".", "lower", "(", ")", "==", "mod", ".", "name", ".", "lower", "(", ")", ":", "obj", ".", "ancestor_mod", "=", "mod", "break", "for", "modproc", "in", "getattr", "(", "obj", ",", "'modprocedures'", ",", "[", "]", ")", ":", "id_mods", "(", "modproc", ",", "modlist", ",", "intrinsic_mods", ")", "for", "func", "in", "getattr", "(", "obj", ",", "'functions'", ",", "[", "]", ")", ":", "id_mods", "(", "func", ",", "modlist", ",", "intrinsic_mods", ")", "for", "subroutine", "in", "getattr", "(", "obj", ",", "'subroutines'", ",", "[", "]", ")", ":", "id_mods", "(", "subroutine", ",", "modlist", ",", "intrinsic_mods", ")" ]
Match USE statements up with the right modules
[ "Match", "USE", "statements", "up", "with", "the", "right", "modules" ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fortran_project.py#L338-L366
237,996
Fortran-FOSS-Programmers/ford
ford/fortran_project.py
Project.allfiles
def allfiles(self): """ Instead of duplicating files, it is much more efficient to create the itterator on the fly """ for f in self.files: yield f for f in self.extra_files: yield f
python
def allfiles(self): """ Instead of duplicating files, it is much more efficient to create the itterator on the fly """ for f in self.files: yield f for f in self.extra_files: yield f
[ "def", "allfiles", "(", "self", ")", ":", "for", "f", "in", "self", ".", "files", ":", "yield", "f", "for", "f", "in", "self", ".", "extra_files", ":", "yield", "f" ]
Instead of duplicating files, it is much more efficient to create the itterator on the fly
[ "Instead", "of", "duplicating", "files", "it", "is", "much", "more", "efficient", "to", "create", "the", "itterator", "on", "the", "fly" ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fortran_project.py#L124-L129
237,997
Fortran-FOSS-Programmers/ford
ford/fortran_project.py
Project.make_links
def make_links(self,base_url='..'): """ Substitute intrasite links to documentation for other parts of the program. """ ford.sourceform.set_base_url(base_url) for src in self.allfiles: src.make_links(self)
python
def make_links(self,base_url='..'): """ Substitute intrasite links to documentation for other parts of the program. """ ford.sourceform.set_base_url(base_url) for src in self.allfiles: src.make_links(self)
[ "def", "make_links", "(", "self", ",", "base_url", "=", "'..'", ")", ":", "ford", ".", "sourceform", ".", "set_base_url", "(", "base_url", ")", "for", "src", "in", "self", ".", "allfiles", ":", "src", ".", "make_links", "(", "self", ")" ]
Substitute intrasite links to documentation for other parts of the program.
[ "Substitute", "intrasite", "links", "to", "documentation", "for", "other", "parts", "of", "the", "program", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fortran_project.py#L305-L312
237,998
Fortran-FOSS-Programmers/ford
ford/utils.py
sub_notes
def sub_notes(docs): """ Substitutes the special controls for notes, warnings, todos, and bugs with the corresponding div. """ def substitute(match): ret = "</p><div class=\"alert alert-{}\" role=\"alert\"><h4>{}</h4>" \ "<p>{}</p></div>".format(NOTE_TYPE[match.group(1).lower()], match.group(1).capitalize(), match.group(2)) if len(match.groups()) >= 4 and not match.group(4): ret += '\n<p>' return ret for regex in NOTE_RE: docs = regex.sub(substitute,docs) return docs
python
def sub_notes(docs): """ Substitutes the special controls for notes, warnings, todos, and bugs with the corresponding div. """ def substitute(match): ret = "</p><div class=\"alert alert-{}\" role=\"alert\"><h4>{}</h4>" \ "<p>{}</p></div>".format(NOTE_TYPE[match.group(1).lower()], match.group(1).capitalize(), match.group(2)) if len(match.groups()) >= 4 and not match.group(4): ret += '\n<p>' return ret for regex in NOTE_RE: docs = regex.sub(substitute,docs) return docs
[ "def", "sub_notes", "(", "docs", ")", ":", "def", "substitute", "(", "match", ")", ":", "ret", "=", "\"</p><div class=\\\"alert alert-{}\\\" role=\\\"alert\\\"><h4>{}</h4>\"", "\"<p>{}</p></div>\"", ".", "format", "(", "NOTE_TYPE", "[", "match", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "]", ",", "match", ".", "group", "(", "1", ")", ".", "capitalize", "(", ")", ",", "match", ".", "group", "(", "2", ")", ")", "if", "len", "(", "match", ".", "groups", "(", ")", ")", ">=", "4", "and", "not", "match", ".", "group", "(", "4", ")", ":", "ret", "+=", "'\\n<p>'", "return", "ret", "for", "regex", "in", "NOTE_RE", ":", "docs", "=", "regex", ".", "sub", "(", "substitute", ",", "docs", ")", "return", "docs" ]
Substitutes the special controls for notes, warnings, todos, and bugs with the corresponding div.
[ "Substitutes", "the", "special", "controls", "for", "notes", "warnings", "todos", "and", "bugs", "with", "the", "corresponding", "div", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/utils.py#L42-L55
237,999
Fortran-FOSS-Programmers/ford
ford/utils.py
paren_split
def paren_split(sep,string): """ Splits the string into pieces divided by sep, when sep is outside of parentheses. """ if len(sep) != 1: raise Exception("Separation string must be one character long") retlist = [] level = 0 blevel = 0 left = 0 for i in range(len(string)): if string[i] == "(": level += 1 elif string[i] == ")": level -= 1 elif string[i] == "[": blevel += 1 elif string[i] == "]": blevel -= 1 elif string[i] == sep and level == 0 and blevel == 0: retlist.append(string[left:i]) left = i+1 retlist.append(string[left:]) return retlist
python
def paren_split(sep,string): """ Splits the string into pieces divided by sep, when sep is outside of parentheses. """ if len(sep) != 1: raise Exception("Separation string must be one character long") retlist = [] level = 0 blevel = 0 left = 0 for i in range(len(string)): if string[i] == "(": level += 1 elif string[i] == ")": level -= 1 elif string[i] == "[": blevel += 1 elif string[i] == "]": blevel -= 1 elif string[i] == sep and level == 0 and blevel == 0: retlist.append(string[left:i]) left = i+1 retlist.append(string[left:]) return retlist
[ "def", "paren_split", "(", "sep", ",", "string", ")", ":", "if", "len", "(", "sep", ")", "!=", "1", ":", "raise", "Exception", "(", "\"Separation string must be one character long\"", ")", "retlist", "=", "[", "]", "level", "=", "0", "blevel", "=", "0", "left", "=", "0", "for", "i", "in", "range", "(", "len", "(", "string", ")", ")", ":", "if", "string", "[", "i", "]", "==", "\"(\"", ":", "level", "+=", "1", "elif", "string", "[", "i", "]", "==", "\")\"", ":", "level", "-=", "1", "elif", "string", "[", "i", "]", "==", "\"[\"", ":", "blevel", "+=", "1", "elif", "string", "[", "i", "]", "==", "\"]\"", ":", "blevel", "-=", "1", "elif", "string", "[", "i", "]", "==", "sep", "and", "level", "==", "0", "and", "blevel", "==", "0", ":", "retlist", ".", "append", "(", "string", "[", "left", ":", "i", "]", ")", "left", "=", "i", "+", "1", "retlist", ".", "append", "(", "string", "[", "left", ":", "]", ")", "return", "retlist" ]
Splits the string into pieces divided by sep, when sep is outside of parentheses.
[ "Splits", "the", "string", "into", "pieces", "divided", "by", "sep", "when", "sep", "is", "outside", "of", "parentheses", "." ]
d46a44eae20d99205292c31785f936fbed47070f
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/utils.py#L88-L106