text
stringlengths
0
828
""""""Yield the error messages.""""""
for msg in self.messages:
col = getattr(msg, 'col', 0)
yield msg.lineno, col, (msg.tpl % msg.message_args), msg.__class__"
1669,"def error(self, line_number, offset, text, check):
""""""Run the checks and collect the errors.""""""
code = super(_Report, self).error(line_number, offset, text, check)
if code:
self.errors.append((line_number, offset + 1, code, text, check))"
1670,"def prompt(prompt_string, default=None, secret=False, boolean=False, bool_type=None):
""""""
Prompt user for a string, with a default value
* secret converts to password prompt
* boolean converts return value to boolean, checking for starting with a Y
""""""
if boolean or bool_type in BOOLEAN_DEFAULTS:
if bool_type is None:
bool_type = 'y_n'
default_msg = BOOLEAN_DEFAULTS[bool_type][is_affirmative(default)]
else:
default_msg = "" (default {val}): ""
prompt_string += (default_msg.format(val=default) if default else "": "")
if secret:
val = getpass(prompt_string)
else:
val = input(prompt_string)
val = (val if val else default)
if boolean:
val = val.lower().startswith('y')
return val"
1671,"def prop_unc(jc):
""""""
Propagate uncertainty.
:param jc: the Jacobian and covariance matrix
:type jc: sequence
This method is mainly designed to be used as the target for a
multiprocessing pool.
""""""
j, c = jc
return np.dot(np.dot(j, c), j.T)"
1672,"def partial_derivative(f, x, n, nargs, delta=DELTA):
""""""
Calculate partial derivative using central finite difference approximation.
:param f: function
:param x: sequence of arguments
:param n: index of argument derivateve is with respect to
:param nargs: number of arguments
:param delta: optional step size, default is :math:`\\epsilon^{1/3}` where
:math:`\\epsilon` is machine precision
""""""
dx = np.zeros((nargs, len(x[n])))
# scale delta by (|x| + 1.0) to avoid noise from machine precision
dx[n] += np.where(x[n], x[n] * delta, delta)
# apply central difference approximation
x_dx = zip(*[xi + (dxi, -dxi) for xi, dxi in zip(x, dx)])
return (f(x_dx[0]) - f(x_dx[1])) / dx[n] / 2.0"
1673,"def jacobian(func, x, nf, nobs, *args, **kwargs):
""""""
Estimate Jacobian matrices :math:`\\frac{\\partial f_i}{\\partial x_{j,k}}`
where :math:`k` are independent observations of :math:`x`.
The independent variable, :math:`x`, must be a numpy array with exactly 2
dimensions. The first dimension is the number of independent arguments,
and the second dimensions is the number of observations.
The function must return a Numpy array with exactly 2 dimensions. The first
is the number of returns and the second dimension corresponds to the number
of observations. If the input argument is 2-D then the output should also
be 2-D
Constant arguments can be passed as additional positional arguments or
keyword arguments. If any constant argument increases the number of
observations of the return value, tile the input arguments to match.
Use :func:`numpy.atleast_2d` or :func:`numpy.reshape` to get the
correct dimensions for scalars.
:param func: function
:param x: independent variables grouped by observation
:param nf: number of return in output (1st dimension)
:param nobs: number of observations in output (2nd dimension)
:return: Jacobian matrices for each observation
""""""
nargs = len(x) # degrees of freedom
f = lambda x_: func(x_, *args, **kwargs)
j = np.zeros((nargs, nf, nobs)) # matrix of zeros
for n in xrange(nargs):
j[n] = partial_derivative(f, x, n, nargs)
# better to transpose J once than transpose partial derivative each time
# j[:,:,n] = df.T
return j.T"
1674,"def jflatten(j):
""""""
Flatten 3_D Jacobian into 2-D.
""""""
nobs, nf, nargs = j.shape