source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(knots, order):
if isinstance(knots, np.ndarray) and knots.ndim > 1:
raise ValueError('knots must be a list or a rank-1 array')
knots = list(knots)
return np.array([knots[0]] * order + knots + [knots[-1]] * order)
|
augknt
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(knots, order):\n if isinstance(knots, np.ndarray) and knots.ndim > 1:\n raise ValueError('knots must be a list or a rank-1 array')\n knots = list(knots)\n return np.array([knots[0]] * order + knots + [knots[-1]] * order)\n",
"role": "user"
},
{
"content": "augknt",
"role": "assistant"
}
] |
def x(t, k):
t = np.atleast_1d(t)
if t.ndim > 1:
raise ValueError('t must be a list or a rank-1 array')
n = t.shape[0]
u = max(0, n - (k - 1))
out = np.empty((u,), dtype=t.dtype)
for j in range(u):
out[j] = sum(t[j:j + k]) / k
return out
|
aveknt
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(t, k):\n t = np.atleast_1d(t)\n if t.ndim > 1:\n raise ValueError('t must be a list or a rank-1 array')\n n = t.shape[0]\n u = max(0, n - (k - 1))\n out = np.empty((u,), dtype=t.dtype)\n for j in range(u):\n out[j] = sum(t[j:j + k]) / k\n return out\n",
"role": "user"
},
{
"content": "aveknt",
"role": "assistant"
}
] |
def x(tau, order):
tau = np.atleast_1d(tau)
k = order + 1
if tau.ndim > 1:
raise ValueError('tau must be a list or a rank-1 array')
if len(tau) < k:
k = len(tau)
if not (tau == sorted(tau)).all():
raise ValueError('tau must be nondecreasing')
u = len(tau) - k
for i in range(u):
if tau[i + k - 1] == tau[i]:
raise ValueError(
'k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d'
% (i, k))
prefix = [tau[0]] * k
suffix = [tau[-1]] * k
tmp = aveknt(tau[1:-1], k - 1)
middle = tmp.tolist()
return np.array(prefix + middle + suffix, dtype=tmp.dtype)
|
aptknt
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tau, order):\n tau = np.atleast_1d(tau)\n k = order + 1\n if tau.ndim > 1:\n raise ValueError('tau must be a list or a rank-1 array')\n if len(tau) < k:\n k = len(tau)\n if not (tau == sorted(tau)).all():\n raise ValueError('tau must be nondecreasing')\n u = len(tau) - k\n for i in range(u):\n if tau[i + k - 1] == tau[i]:\n raise ValueError(\n 'k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d'\n % (i, k))\n prefix = [tau[0]] * k\n suffix = [tau[-1]] * k\n tmp = aveknt(tau[1:-1], k - 1)\n middle = tmp.tolist()\n return np.array(prefix + middle + suffix, dtype=tmp.dtype)\n",
"role": "user"
},
{
"content": "aptknt",
"role": "assistant"
}
] |
def x(t):
t = np.atleast_1d(t)
if t.ndim > 1:
raise ValueError('t must be a list or a rank-1 array')
out = []
e = None
for k in range(t.shape[0]):
if t[k] != e:
e = t[k]
count = 0
else:
count += 1
out.append(count)
return np.array(out)
|
knt2mlt
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(t):\n t = np.atleast_1d(t)\n if t.ndim > 1:\n raise ValueError('t must be a list or a rank-1 array')\n out = []\n e = None\n for k in range(t.shape[0]):\n if t[k] != e:\n e = t[k]\n count = 0\n else:\n count += 1\n out.append(count)\n return np.array(out)\n",
"role": "user"
},
{
"content": "knt2mlt",
"role": "assistant"
}
] |
def x(knots, order, tau):
m = knt2mlt(tau)
B = bspline.Bspline(knots, order)
dummy = B(0.0)
nbasis = len(dummy)
A = np.empty((tau.shape[0], nbasis), dtype=dummy.dtype)
for i, item in enumerate(zip(tau, m)):
taui, mi = item
f = B.diff(order=mi)
A[i, :] = f(taui)
return A
|
spcol
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(knots, order, tau):\n m = knt2mlt(tau)\n B = bspline.Bspline(knots, order)\n dummy = B(0.0)\n nbasis = len(dummy)\n A = np.empty((tau.shape[0], nbasis), dtype=dummy.dtype)\n for i, item in enumerate(zip(tau, m)):\n taui, mi = item\n f = B.diff(order=mi)\n A[i, :] = f(taui)\n return A\n",
"role": "user"
},
{
"content": "spcol",
"role": "assistant"
}
] |
def x(self, xi):
return np.where(np.all([self.knot_vector[:-1] <= xi, xi < self.
knot_vector[1:]], axis=0), 1.0, 0.0)
|
basis0
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, xi):\n return np.where(np.all([self.knot_vector[:-1] <= xi, xi < self.\n knot_vector[1:]], axis=0), 1.0, 0.0)\n",
"role": "user"
},
{
"content": "basis0",
"role": "assistant"
}
] |
def x(self, xi, p, compute_derivatives=False):
if p == 0:
return self.__basis0(xi)
else:
basis_p_minus_1 = self.__basis(xi, p - 1)
first_term_numerator = xi - self.knot_vector[:-p]
first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p]
second_term_numerator = self.knot_vector[p + 1:] - xi
second_term_denominator = self.knot_vector[p + 1:] - self.knot_vector[1:-p]
if compute_derivatives and p == self.p:
first_term_numerator = p
second_term_numerator = -p
with np.errstate(divide='ignore', invalid='ignore'):
first_term = np.where(first_term_denominator != 0.0,
first_term_numerator / first_term_denominator, 0.0)
second_term = np.where(second_term_denominator != 0.0,
second_term_numerator / second_term_denominator, 0.0)
return first_term[:-1] * basis_p_minus_1[:-1
] + second_term * basis_p_minus_1[1:]
|
basis
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, xi, p, compute_derivatives=False):\n if p == 0:\n return self.__basis0(xi)\n else:\n basis_p_minus_1 = self.__basis(xi, p - 1)\n first_term_numerator = xi - self.knot_vector[:-p]\n first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p]\n second_term_numerator = self.knot_vector[p + 1:] - xi\n second_term_denominator = self.knot_vector[p + 1:] - self.knot_vector[1:-p]\n if compute_derivatives and p == self.p:\n first_term_numerator = p\n second_term_numerator = -p\n with np.errstate(divide='ignore', invalid='ignore'):\n first_term = np.where(first_term_denominator != 0.0, \n first_term_numerator / first_term_denominator, 0.0)\n second_term = np.where(second_term_denominator != 0.0, \n second_term_numerator / second_term_denominator, 0.0)\n return first_term[:-1] * basis_p_minus_1[:-1\n ] + second_term * basis_p_minus_1[1:]\n",
"role": "user"
},
{
"content": "basis",
"role": "assistant"
}
] |
def x(self, xi):
return self.__basis(xi, self.p, compute_derivatives=True)
|
d
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, xi):\n return self.__basis(xi, self.p, compute_derivatives=True)\n",
"role": "user"
},
{
"content": "d",
"role": "assistant"
}
] |
def x(self):
try:
import matplotlib.pyplot as plt
except ImportError:
from sys import stderr
print(
'ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function'
, file=stderr)
raise
x_min = np.min(self.knot_vector)
x_max = np.max(self.knot_vector)
x = np.linspace(x_min, x_max, num=1000)
N = np.array([self(i) for i in x]).T
for n in N:
plt.plot(x, n)
return plt.show()
|
plot
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\n 'ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function'\n , file=stderr)\n raise\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n x = np.linspace(x_min, x_max, num=1000)\n N = np.array([self(i) for i in x]).T\n for n in N:\n plt.plot(x, n)\n return plt.show()\n",
"role": "user"
},
{
"content": "plot",
"role": "assistant"
}
] |
def x(self):
assert self.p > 0, 'order of Bspline must be > 0'
t = self.knot_vector
p = self.p
Bi = Bspline(t[:-1], p - 1)
Bip1 = Bspline(t[1:], p - 1)
numer1 = +p
numer2 = -p
denom1 = t[p:-1] - t[:-(p + 1)]
denom2 = t[p + 1:] - t[1:-p]
with np.errstate(divide='ignore', invalid='ignore'):
ci = np.where(denom1 != 0.0, numer1 / denom1, 0.0)
cip1 = np.where(denom2 != 0.0, numer2 / denom2, 0.0)
return (ci, Bi), (cip1, Bip1)
|
diff_internal
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n assert self.p > 0, 'order of Bspline must be > 0'\n t = self.knot_vector\n p = self.p\n Bi = Bspline(t[:-1], p - 1)\n Bip1 = Bspline(t[1:], p - 1)\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p + 1)]\n denom2 = t[p + 1:] - t[1:-p]\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0.0, numer1 / denom1, 0.0)\n cip1 = np.where(denom2 != 0.0, numer2 / denom2, 0.0)\n return (ci, Bi), (cip1, Bip1)\n",
"role": "user"
},
{
"content": "diff_internal",
"role": "assistant"
}
] |
def x(self, order=1):
order = int(order)
if order < 0:
raise ValueError('order must be >= 0, got %d' % order)
if order == 0:
return self.__call__
if order > self.p:
dummy = self.__call__(0.0)
nbasis = dummy.shape[0]
return lambda x: np.zeros((nbasis,), dtype=dummy.dtype)
terms = [(1.0, self)]
for k in range(order):
tmp = []
for Ci, Bi in terms:
tmp.extend((Ci * cn, Bn) for cn, Bn in Bi.__diff_internal())
terms = tmp
return lambda x: sum(ci * Bi(x) for ci, Bi in terms)
|
diff
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, order=1):\n order = int(order)\n if order < 0:\n raise ValueError('order must be >= 0, got %d' % order)\n if order == 0:\n return self.__call__\n if order > self.p:\n dummy = self.__call__(0.0)\n nbasis = dummy.shape[0]\n return lambda x: np.zeros((nbasis,), dtype=dummy.dtype)\n terms = [(1.0, self)]\n for k in range(order):\n tmp = []\n for Ci, Bi in terms:\n tmp.extend((Ci * cn, Bn) for cn, Bn in Bi.__diff_internal())\n terms = tmp\n return lambda x: sum(ci * Bi(x) for ci, Bi in terms)\n",
"role": "user"
},
{
"content": "diff",
"role": "assistant"
}
] |
def x(self, tau, deriv_order=0):
dummy = self.__call__(0.0)
nbasis = dummy.shape[0]
tau = np.atleast_1d(tau)
if tau.ndim > 1:
raise ValueError('tau must be a list or a rank-1 array')
A = np.empty((tau.shape[0], nbasis), dtype=dummy.dtype)
f = self.diff(order=deriv_order)
for i, taui in enumerate(tau):
A[i, :] = f(taui)
return np.squeeze(A)
|
collmat
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tau, deriv_order=0):\n dummy = self.__call__(0.0)\n nbasis = dummy.shape[0]\n tau = np.atleast_1d(tau)\n if tau.ndim > 1:\n raise ValueError('tau must be a list or a rank-1 array')\n A = np.empty((tau.shape[0], nbasis), dtype=dummy.dtype)\n f = self.diff(order=deriv_order)\n for i, taui in enumerate(tau):\n A[i, :] = f(taui)\n return np.squeeze(A)\n",
"role": "user"
},
{
"content": "collmat",
"role": "assistant"
}
] |
def x(interfaces):
def x(block, parts):
try:
method = block['method'].lower()
except KeyError:
for k, v in compat.iteritems(block):
traverse(v, parts + [k])
else:
path = '.'.join(parts)
try:
methods[method]
except KeyError:
methods[method] = {}
methods[method][path] = block
methods = {}
for key, val in compat.iteritems(interfaces):
traverse(val, [key])
return methods
|
build_interfaces_by_method
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(interfaces):\n\n def x(block, parts):\n try:\n method = block['method'].lower()\n except KeyError:\n for k, v in compat.iteritems(block):\n traverse(v, parts + [k])\n else:\n path = '.'.join(parts)\n try:\n methods[method]\n except KeyError:\n methods[method] = {}\n methods[method][path] = block\n methods = {}\n for key, val in compat.iteritems(interfaces):\n traverse(val, [key])\n return methods\n",
"role": "user"
},
{
"content": "build_interfaces_by_method",
"role": "assistant"
}
] |
def x(method, url, nonce, params, ext='', body_hash=None):
urlparts = urlparse.urlparse(url)
if urlparts.query:
norm_url = '%s?%s' % (urlparts.path, urlparts.query)
elif params:
norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params))
else:
norm_url = urlparts.path
if not body_hash:
body_hash = get_body_hash(params)
port = urlparts.port
if not port:
assert urlparts.scheme in ('http', 'https')
if urlparts.scheme == 'http':
port = 80
elif urlparts.scheme == 'https':
port = 443
output = [nonce, method.upper(), norm_url, urlparts.hostname, port,
body_hash, ext, '']
return '\n'.join(map(str, output))
|
get_normalized_request_string
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(method, url, nonce, params, ext='', body_hash=None):\n urlparts = urlparse.urlparse(url)\n if urlparts.query:\n norm_url = '%s?%s' % (urlparts.path, urlparts.query)\n elif params:\n norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params))\n else:\n norm_url = urlparts.path\n if not body_hash:\n body_hash = get_body_hash(params)\n port = urlparts.port\n if not port:\n assert urlparts.scheme in ('http', 'https')\n if urlparts.scheme == 'http':\n port = 80\n elif urlparts.scheme == 'https':\n port = 443\n output = [nonce, method.upper(), norm_url, urlparts.hostname, port,\n body_hash, ext, '']\n return '\\n'.join(map(str, output))\n",
"role": "user"
},
{
"content": "get_normalized_request_string",
"role": "assistant"
}
] |
def x(params):
norm_params = get_normalized_params(params)
return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1]
|
get_body_hash
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(params):\n norm_params = get_normalized_params(params)\n return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1]\n",
"role": "user"
},
{
"content": "get_body_hash",
"role": "assistant"
}
] |
def x(api_secret, norm_request_string):
hashed = hmac.new(str(api_secret), norm_request_string, hashlib.sha1)
return binascii.b2a_base64(hashed.digest())[:-1]
|
get_mac_signature
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(api_secret, norm_request_string):\n hashed = hmac.new(str(api_secret), norm_request_string, hashlib.sha1)\n return binascii.b2a_base64(hashed.digest())[:-1]\n",
"role": "user"
},
{
"content": "get_mac_signature",
"role": "assistant"
}
] |
def x(self, access_mode='r'):
try:
with open(self, access_mode, encoding='utf-8') as file:
return file.read()
except IOError:
print(self + ' File not found.')
sys.exit(0)
|
open_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, access_mode='r'):\n try:\n with open(self, access_mode, encoding='utf-8') as file:\n return file.read()\n except IOError:\n print(self + ' File not found.')\n sys.exit(0)\n",
"role": "user"
},
{
"content": "open_file",
"role": "assistant"
}
] |
def x(self, itraj):
A = self._hmm.transition_matrix
pi = self._hmm.initial_distribution
obs = self._observations[itraj]
T = len(obs)
self._hmm.output_model.p_obs(obs, out=self._pobs)
logprob = hidden.forward(A, self._pobs, pi, T=T, alpha_out=self._alpha)[0]
hidden.backward(A, self._pobs, T=T, beta_out=self._beta)
hidden.state_probabilities(self._alpha, self._beta, T=T, gamma_out=self
._gammas[itraj])
hidden.transition_counts(self._alpha, self._beta, A, self._pobs, T=T,
out=self._Cs[itraj])
return logprob
|
forward_backward
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, itraj):\n A = self._hmm.transition_matrix\n pi = self._hmm.initial_distribution\n obs = self._observations[itraj]\n T = len(obs)\n self._hmm.output_model.p_obs(obs, out=self._pobs)\n logprob = hidden.forward(A, self._pobs, pi, T=T, alpha_out=self._alpha)[0]\n hidden.backward(A, self._pobs, T=T, beta_out=self._beta)\n hidden.state_probabilities(self._alpha, self._beta, T=T, gamma_out=self\n ._gammas[itraj])\n hidden.transition_counts(self._alpha, self._beta, A, self._pobs, T=T,\n out=self._Cs[itraj])\n return logprob\n",
"role": "user"
},
{
"content": "forward_backward",
"role": "assistant"
}
] |
def x(self, gammas, count_matrices, maxiter=10000000):
gamma0_sum = self._init_counts(gammas)
C = self._transition_counts(count_matrices)
logger().info('Initial count = \n' + str(gamma0_sum))
logger().info('Count matrix = \n' + str(C))
from bhmm.estimators._tmatrix_disconnected import estimate_P, stationary_distribution
T = estimate_P(C, reversible=self._hmm.is_reversible, fixed_statdist=
self._fixed_stationary_distribution, maxiter=maxiter, maxerr=1e-12,
mincount_connectivity=1e-16)
if self._stationary:
if self._fixed_stationary_distribution is None:
pi = stationary_distribution(T, C=C, mincount_connectivity=1e-16)
else:
pi = self._fixed_stationary_distribution
elif self._fixed_initial_distribution is None:
pi = gamma0_sum / np.sum(gamma0_sum)
else:
pi = self._fixed_initial_distribution
self._hmm.update(pi, T)
logger().info('T: \n' + str(T))
logger().info('pi: \n' + str(pi))
self._hmm.output_model.estimate(self._observations, gammas)
|
update_model
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, gammas, count_matrices, maxiter=10000000):\n gamma0_sum = self._init_counts(gammas)\n C = self._transition_counts(count_matrices)\n logger().info('Initial count = \\n' + str(gamma0_sum))\n logger().info('Count matrix = \\n' + str(C))\n from bhmm.estimators._tmatrix_disconnected import estimate_P, stationary_distribution\n T = estimate_P(C, reversible=self._hmm.is_reversible, fixed_statdist=\n self._fixed_stationary_distribution, maxiter=maxiter, maxerr=1e-12,\n mincount_connectivity=1e-16)\n if self._stationary:\n if self._fixed_stationary_distribution is None:\n pi = stationary_distribution(T, C=C, mincount_connectivity=1e-16)\n else:\n pi = self._fixed_stationary_distribution\n elif self._fixed_initial_distribution is None:\n pi = gamma0_sum / np.sum(gamma0_sum)\n else:\n pi = self._fixed_initial_distribution\n self._hmm.update(pi, T)\n logger().info('T: \\n' + str(T))\n logger().info('pi: \\n' + str(pi))\n self._hmm.output_model.estimate(self._observations, gammas)\n",
"role": "user"
},
{
"content": "update_model",
"role": "assistant"
}
] |
def x(self):
K = len(self._observations)
A = self._hmm.transition_matrix
pi = self._hmm.initial_distribution
paths = np.empty(K, dtype=object)
for itraj in range(K):
obs = self._observations[itraj]
pobs = self._hmm.output_model.p_obs(obs)
paths[itraj] = hidden.viterbi(A, pobs, pi)
return paths
|
compute_viterbi_paths
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n K = len(self._observations)\n A = self._hmm.transition_matrix\n pi = self._hmm.initial_distribution\n paths = np.empty(K, dtype=object)\n for itraj in range(K):\n obs = self._observations[itraj]\n pobs = self._hmm.output_model.p_obs(obs)\n paths[itraj] = hidden.viterbi(A, pobs, pi)\n return paths\n",
"role": "user"
},
{
"content": "compute_viterbi_paths",
"role": "assistant"
}
] |
def x(self):
logger().info(
'=================================================================')
logger().info('Running Baum-Welch:')
logger().info(' input observations: ' + str(self.nobservations) +
' of lengths ' + str(self.observation_lengths))
logger().info(' initial HMM guess:' + str(self._hmm))
initial_time = time.time()
it = 0
self._likelihoods = np.zeros(self.maxit)
loglik = 0.0
tmatrix_nonzeros = self.hmm.transition_matrix.nonzero()
converged = False
while not converged and it < self.maxit:
t1 = time.time()
loglik = 0.0
for k in range(self._nobs):
loglik += self._forward_backward(k)
assert np.isfinite(loglik), it
t2 = time.time()
if it > 0:
dL = loglik - self._likelihoods[it - 1]
if dL < self._accuracy:
converged = True
self._update_model(self._gammas, self._Cs, maxiter=self._maxit_P)
t3 = time.time()
tmatrix_nonzeros_new = self.hmm.transition_matrix.nonzero()
if not np.array_equal(tmatrix_nonzeros, tmatrix_nonzeros_new):
converged = False
tmatrix_nonzeros = tmatrix_nonzeros_new
logger().info(str(it) + ' ll = ' + str(loglik))
self._likelihoods[it] = loglik
it += 1
self._likelihoods = self._likelihoods[:it]
self._hmm.likelihood = loglik
self.count_matrix = self._transition_counts(self._Cs)
self.initial_count = self._init_counts(self._gammas)
final_time = time.time()
elapsed_time = final_time - initial_time
logger().info('maximum likelihood HMM:' + str(self._hmm))
logger().info('Elapsed time for Baum-Welch solution: %.3f s' % elapsed_time
)
logger().info('Computing Viterbi path:')
initial_time = time.time()
self._hmm.hidden_state_trajectories = self.compute_viterbi_paths()
final_time = time.time()
elapsed_time = final_time - initial_time
logger().info('Elapsed time for Viterbi path computation: %.3f s' %
elapsed_time)
logger().info(
'=================================================================')
return self._hmm
|
fit
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n logger().info(\n '=================================================================')\n logger().info('Running Baum-Welch:')\n logger().info(' input observations: ' + str(self.nobservations) +\n ' of lengths ' + str(self.observation_lengths))\n logger().info(' initial HMM guess:' + str(self._hmm))\n initial_time = time.time()\n it = 0\n self._likelihoods = np.zeros(self.maxit)\n loglik = 0.0\n tmatrix_nonzeros = self.hmm.transition_matrix.nonzero()\n converged = False\n while not converged and it < self.maxit:\n t1 = time.time()\n loglik = 0.0\n for k in range(self._nobs):\n loglik += self._forward_backward(k)\n assert np.isfinite(loglik), it\n t2 = time.time()\n if it > 0:\n dL = loglik - self._likelihoods[it - 1]\n if dL < self._accuracy:\n converged = True\n self._update_model(self._gammas, self._Cs, maxiter=self._maxit_P)\n t3 = time.time()\n tmatrix_nonzeros_new = self.hmm.transition_matrix.nonzero()\n if not np.array_equal(tmatrix_nonzeros, tmatrix_nonzeros_new):\n converged = False\n tmatrix_nonzeros = tmatrix_nonzeros_new\n logger().info(str(it) + ' ll = ' + str(loglik))\n self._likelihoods[it] = loglik\n it += 1\n self._likelihoods = self._likelihoods[:it]\n self._hmm.likelihood = loglik\n self.count_matrix = self._transition_counts(self._Cs)\n self.initial_count = self._init_counts(self._gammas)\n final_time = time.time()\n elapsed_time = final_time - initial_time\n logger().info('maximum likelihood HMM:' + str(self._hmm))\n logger().info('Elapsed time for Baum-Welch solution: %.3f s' % elapsed_time\n )\n logger().info('Computing Viterbi path:')\n initial_time = time.time()\n self._hmm.hidden_state_trajectories = self.compute_viterbi_paths()\n final_time = time.time()\n elapsed_time = final_time - initial_time\n logger().info('Elapsed time for Viterbi path computation: %.3f s' %\n elapsed_time)\n logger().info(\n '=================================================================')\n return self._hmm\n",
"role": "user"
},
{
"content": "fit",
"role": "assistant"
}
] |
def x(mean, covar, covariance_type='diag', n_samples=1, random_state=None):
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = n_dim,
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s)
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
|
sample_gaussian
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(mean, covar, covariance_type='diag', n_samples=1, random_state=None):\n rng = check_random_state(random_state)\n n_dim = len(mean)\n rand = rng.randn(n_dim, n_samples)\n if n_samples == 1:\n rand.shape = n_dim,\n if covariance_type == 'spherical':\n rand *= np.sqrt(covar)\n elif covariance_type == 'diag':\n rand = np.dot(np.diag(np.sqrt(covar)), rand)\n else:\n s, U = linalg.eigh(covar)\n s.clip(0, out=s)\n np.sqrt(s, out=s)\n U *= s\n rand = np.dot(U, rand)\n return (rand.T + mean).T\n",
"role": "user"
},
{
"content": "sample_gaussian",
"role": "assistant"
}
] |
def x(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
|
covar_mstep_diag
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):\n avg_X2 = np.dot(responsibilities.T, X * X) * norm\n avg_means2 = gmm.means_ ** 2\n avg_X_means = gmm.means_ * weighted_X_sum * norm\n return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar\n",
"role": "user"
},
{
"content": "covar_mstep_diag",
"role": "assistant"
}
] |
def x(*args):
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
|
covar_mstep_spherical
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*args):\n cv = _covar_mstep_diag(*args)\n return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))\n",
"role": "user"
},
{
"content": "covar_mstep_spherical",
"role": "assistant"
}
] |
def x(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
|
covar_mstep_full
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):\n n_features = X.shape[1]\n cv = np.empty((gmm.n_components, n_features, n_features))\n for c in range(gmm.n_components):\n post = responsibilities[:, c]\n mu = gmm.means_[c]\n diff = X - mu\n with np.errstate(under='ignore'):\n avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)\n cv[c] = avg_cv + min_covar * np.eye(n_features)\n return cv\n",
"role": "user"
},
{
"content": "covar_mstep_full",
"role": "assistant"
}
] |
def x(self):
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
|
get_covars
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.covariance_type == 'full':\n return self.covars_\n elif self.covariance_type == 'diag':\n return [np.diag(cov) for cov in self.covars_]\n elif self.covariance_type == 'tied':\n return [self.covars_] * self.n_components\n elif self.covariance_type == 'spherical':\n return [np.diag(cov) for cov in self.covars_]\n",
"role": "user"
},
{
"content": "get_covars",
"role": "assistant"
}
] |
def x(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
|
set_covars
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, covars):\n covars = np.asarray(covars)\n _validate_covars(covars, self.covariance_type, self.n_components)\n self.covars_ = covars\n",
"role": "user"
},
{
"content": "set_covars",
"role": "assistant"
}
] |
def x(self, X):
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) + np.log(self.weights_)
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
|
score_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X):\n check_is_fitted(self, 'means_')\n X = check_array(X)\n if X.ndim == 1:\n X = X[:, np.newaxis]\n if X.size == 0:\n return np.array([]), np.empty((0, self.n_components))\n if X.shape[1] != self.means_.shape[1]:\n raise ValueError('The shape of X is not compatible with self')\n lpr = log_multivariate_normal_density(X, self.means_, self.covars_,\n self.covariance_type) + np.log(self.weights_)\n logprob = logsumexp(lpr, axis=1)\n responsibilities = np.exp(lpr - logprob[:, np.newaxis])\n return logprob, responsibilities\n",
"role": "user"
},
{
"content": "score_samples",
"role": "assistant"
}
] |
def x(self, X, y=None):
logprob, _ = self.score_samples(X)
return logprob
|
score
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, y=None):\n logprob, _ = self.score_samples(X)\n return logprob\n",
"role": "user"
},
{
"content": "score",
"role": "assistant"
}
] |
def x(self, X):
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
|
predict
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X):\n logprob, responsibilities = self.score_samples(X)\n return responsibilities.argmax(axis=1)\n",
"role": "user"
},
{
"content": "predict",
"role": "assistant"
}
] |
def x(self, n_samples=1, random_state=None):
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
comps = weight_cdf.searchsorted(rand)
for comp in range(self.n_components):
comp_in_X = comp == comps
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(self.means_[comp], cv, self.
covariance_type, num_comp_in_X, random_state=random_state).T
return X
|
sample
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, n_samples=1, random_state=None):\n check_is_fitted(self, 'means_')\n if random_state is None:\n random_state = self.random_state\n random_state = check_random_state(random_state)\n weight_cdf = np.cumsum(self.weights_)\n X = np.empty((n_samples, self.means_.shape[1]))\n rand = random_state.rand(n_samples)\n comps = weight_cdf.searchsorted(rand)\n for comp in range(self.n_components):\n comp_in_X = comp == comps\n num_comp_in_X = comp_in_X.sum()\n if num_comp_in_X > 0:\n if self.covariance_type == 'tied':\n cv = self.covars_\n elif self.covariance_type == 'spherical':\n cv = self.covars_[comp][0]\n else:\n cv = self.covars_[comp]\n X[comp_in_X] = sample_gaussian(self.means_[comp], cv, self.\n covariance_type, num_comp_in_X, random_state=random_state).T\n return X\n",
"role": "user"
},
{
"content": "sample",
"role": "assistant"
}
] |
def x(self, X, y=None):
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
if np.issubdtype(X.dtype, np.float32):
from bhmm._external.clustering.kmeans_clustering_32 import init_centers
elif np.issubdtype(X.dtype, np.float64):
from bhmm._external.clustering.kmeans_clustering_64 import init_centers
else:
raise ValueError(
'Could not handle dtype %s for clustering!' % X.dtype)
centers = init_centers(X, 'euclidean', self.n_components)
self.means_ = centers
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = 1, 1
self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv,
self.covariance_type, self.n_components)
current_log_likelihood = None
self.converged_ = False
tol = self.tol if self.thresh is None else self.thresh / float(X.
shape[0])
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < tol:
self.converged_ = True
break
self._do_mstep(X, responsibilities, self.params, self.min_covar)
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_, 'means': self.
means_, 'covars': self.covars_}
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
'EM algorithm was never able to compute a valid likelihood ' +
'given initial parameters. Try different init parameters ' +
'(or increasing n_init) or check for degenerate data.')
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
|
fit
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, y=None):\n X = check_array(X, dtype=np.float64)\n if X.shape[0] < self.n_components:\n raise ValueError(\n 'GMM estimation with %s components, but got only %s samples' %\n (self.n_components, X.shape[0]))\n max_log_prob = -np.infty\n for _ in range(self.n_init):\n if 'm' in self.init_params or not hasattr(self, 'means_'):\n if np.issubdtype(X.dtype, np.float32):\n from bhmm._external.clustering.kmeans_clustering_32 import init_centers\n elif np.issubdtype(X.dtype, np.float64):\n from bhmm._external.clustering.kmeans_clustering_64 import init_centers\n else:\n raise ValueError(\n 'Could not handle dtype %s for clustering!' % X.dtype)\n centers = init_centers(X, 'euclidean', self.n_components)\n self.means_ = centers\n if 'w' in self.init_params or not hasattr(self, 'weights_'):\n self.weights_ = np.tile(1.0 / self.n_components, self.n_components)\n if 'c' in self.init_params or not hasattr(self, 'covars_'):\n cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])\n if not cv.shape:\n cv.shape = 1, 1\n self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv,\n self.covariance_type, self.n_components)\n current_log_likelihood = None\n self.converged_ = False\n tol = self.tol if self.thresh is None else self.thresh / float(X.\n shape[0])\n for i in range(self.n_iter):\n prev_log_likelihood = current_log_likelihood\n log_likelihoods, responsibilities = self.score_samples(X)\n current_log_likelihood = log_likelihoods.mean()\n if prev_log_likelihood is not None:\n change = abs(current_log_likelihood - prev_log_likelihood)\n if change < tol:\n self.converged_ = True\n break\n self._do_mstep(X, responsibilities, self.params, self.min_covar)\n if self.n_iter:\n if current_log_likelihood > max_log_prob:\n max_log_prob = current_log_likelihood\n best_params = {'weights': self.weights_, 'means': self.\n means_, 'covars': self.covars_}\n if np.isneginf(max_log_prob) and self.n_iter:\n raise RuntimeError(\n 'EM algorithm was never able to compute a valid likelihood ' +\n 'given initial parameters. Try different init parameters ' +\n '(or increasing n_init) or check for degenerate data.')\n if self.n_iter:\n self.covars_ = best_params['covars']\n self.means_ = best_params['means']\n self.weights_ = best_params['weights']\n return self\n",
"role": "user"
},
{
"content": "fit",
"role": "assistant"
}
] |
def x(self, X, responsibilities, params, min_covar=0):
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = weights / (weights.sum() + 10 * EPS) + EPS
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(self, X, responsibilities,
weighted_X_sum, inverse_weights, min_covar)
return weights
|
do_mstep
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, responsibilities, params, min_covar=0):\n weights = responsibilities.sum(axis=0)\n weighted_X_sum = np.dot(responsibilities.T, X)\n inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)\n if 'w' in params:\n self.weights_ = weights / (weights.sum() + 10 * EPS) + EPS\n if 'm' in params:\n self.means_ = weighted_X_sum * inverse_weights\n if 'c' in params:\n covar_mstep_func = _covar_mstep_funcs[self.covariance_type]\n self.covars_ = covar_mstep_func(self, X, responsibilities,\n weighted_X_sum, inverse_weights, min_covar)\n return weights\n",
"role": "user"
},
{
"content": "do_mstep",
"role": "assistant"
}
] |
def x(self):
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.0
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.0
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
|
n_parameters
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n ndim = self.means_.shape[1]\n if self.covariance_type == 'full':\n cov_params = self.n_components * ndim * (ndim + 1) / 2.0\n elif self.covariance_type == 'diag':\n cov_params = self.n_components * ndim\n elif self.covariance_type == 'tied':\n cov_params = ndim * (ndim + 1) / 2.0\n elif self.covariance_type == 'spherical':\n cov_params = self.n_components\n mean_params = ndim * self.n_components\n return int(cov_params + mean_params + self.n_components - 1)\n",
"role": "user"
},
{
"content": "n_parameters",
"role": "assistant"
}
] |
def x(self, X):
return -2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0])
|
bic
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X):\n return -2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0])\n",
"role": "user"
},
{
"content": "bic",
"role": "assistant"
}
] |
def x(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
return []
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError(
"scikit-learn estimators should always specify their parameters in the signature of their __init__ (no varargs). %s doesn't follow this convention."
% (cls,))
args.pop(0)
args.sort()
return args
|
get_param_names
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cls):\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n return []\n args, varargs, kw, default = inspect.getargspec(init)\n if varargs is not None:\n raise RuntimeError(\n \"scikit-learn estimators should always specify their parameters in the signature of their __init__ (no varargs). %s doesn't follow this convention.\"\n % (cls,))\n args.pop(0)\n args.sort()\n return args\n",
"role": "user"
},
{
"content": "get_param_names",
"role": "assistant"
}
] |
def x(self, **params):
if not params:
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
if not key in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
|
set_params
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **params):\n if not params:\n return self\n valid_params = self.get_params(deep=True)\n for key, value in six.iteritems(params):\n split = key.split('__', 1)\n if len(split) > 1:\n name, sub_name = split\n if not name in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n if not key in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s' %\n (key, self.__class__.__name__))\n setattr(self, key, value)\n return self\n",
"role": "user"
},
{
"content": "set_params",
"role": "assistant"
}
] |
def x(observations, nstates, reversible=True):
ntrajectories = len(observations)
collected_observations = np.array([], dtype=config.dtype)
for o_t in observations:
collected_observations = np.append(collected_observations, o_t)
from bhmm._external.sklearn import mixture
gmm = mixture.GMM(n_components=nstates)
gmm.fit(collected_observations[:, None])
from bhmm import GaussianOutputModel
output_model = GaussianOutputModel(nstates, means=gmm.means_[:, 0],
sigmas=np.sqrt(gmm.covars_[:, 0]))
logger().info('Gaussian output model:\n' + str(output_model))
Pi = np.zeros([nstates], np.float64)
Pi[:] = gmm.weights_[:]
logger().info('GMM weights: %s' % str(gmm.weights_))
Nij = np.zeros([nstates, nstates], np.float64)
for o_t in observations:
T = o_t.shape[0]
pobs = output_model.p_obs(o_t)
pobs /= pobs.sum(axis=1)[:, None]
for t in range(T - 1):
Nij[:, :] = Nij[:, :] + np.outer(pobs[t, :], pobs[t + 1, :])
logger().info('Nij\n' + str(Nij))
import msmtools.estimation as msmest
import msmtools.analysis as msmana
Tij = msmest.transition_matrix(Nij, reversible=reversible)
pi = msmana.stationary_distribution(Tij)
model = HMM(pi, Tij, output_model)
return model
|
init_model_gaussian1d
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(observations, nstates, reversible=True):\n ntrajectories = len(observations)\n collected_observations = np.array([], dtype=config.dtype)\n for o_t in observations:\n collected_observations = np.append(collected_observations, o_t)\n from bhmm._external.sklearn import mixture\n gmm = mixture.GMM(n_components=nstates)\n gmm.fit(collected_observations[:, None])\n from bhmm import GaussianOutputModel\n output_model = GaussianOutputModel(nstates, means=gmm.means_[:, 0],\n sigmas=np.sqrt(gmm.covars_[:, 0]))\n logger().info('Gaussian output model:\\n' + str(output_model))\n Pi = np.zeros([nstates], np.float64)\n Pi[:] = gmm.weights_[:]\n logger().info('GMM weights: %s' % str(gmm.weights_))\n Nij = np.zeros([nstates, nstates], np.float64)\n for o_t in observations:\n T = o_t.shape[0]\n pobs = output_model.p_obs(o_t)\n pobs /= pobs.sum(axis=1)[:, None]\n for t in range(T - 1):\n Nij[:, :] = Nij[:, :] + np.outer(pobs[t, :], pobs[t + 1, :])\n logger().info('Nij\\n' + str(Nij))\n import msmtools.estimation as msmest\n import msmtools.analysis as msmana\n Tij = msmest.transition_matrix(Nij, reversible=reversible)\n pi = msmana.stationary_distribution(Tij)\n model = HMM(pi, Tij, output_model)\n return model\n",
"role": "user"
},
{
"content": "init_model_gaussian1d",
"role": "assistant"
}
] |
def x(self, o):
if self.__impl__ == self.__IMPL_C__:
return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o))
elif self.__impl__ == self.__IMPL_PYTHON__:
if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps):
raise RuntimeError('at least one sigma is too small to continue.')
C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas)
Pobs = C * np.exp(-0.5 * ((o - self.means) / self.sigmas) ** 2)
return Pobs
else:
raise RuntimeError('Implementation ' + str(self.__impl__) +
' not available')
|
p_o
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, o):\n if self.__impl__ == self.__IMPL_C__:\n return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o))\n elif self.__impl__ == self.__IMPL_PYTHON__:\n if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps):\n raise RuntimeError('at least one sigma is too small to continue.')\n C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas)\n Pobs = C * np.exp(-0.5 * ((o - self.means) / self.sigmas) ** 2)\n return Pobs\n else:\n raise RuntimeError('Implementation ' + str(self.__impl__) +\n ' not available')\n",
"role": "user"
},
{
"content": "p_o",
"role": "assistant"
}
] |
def x(self, obs, out=None):
if self.__impl__ == self.__IMPL_C__:
res = gc.p_obs(obs, self.means, self.sigmas, out=out, dtype=config.
dtype)
return self._handle_outliers(res)
elif self.__impl__ == self.__IMPL_PYTHON__:
T = len(obs)
if out is None:
res = np.zeros((T, self.nstates), dtype=config.dtype)
else:
res = out
for t in range(T):
res[t, :] = self._p_o(obs[t])
return self._handle_outliers(res)
else:
raise RuntimeError('Implementation ' + str(self.__impl__) +
' not available')
|
p_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, obs, out=None):\n if self.__impl__ == self.__IMPL_C__:\n res = gc.p_obs(obs, self.means, self.sigmas, out=out, dtype=config.\n dtype)\n return self._handle_outliers(res)\n elif self.__impl__ == self.__IMPL_PYTHON__:\n T = len(obs)\n if out is None:\n res = np.zeros((T, self.nstates), dtype=config.dtype)\n else:\n res = out\n for t in range(T):\n res[t, :] = self._p_o(obs[t])\n return self._handle_outliers(res)\n else:\n raise RuntimeError('Implementation ' + str(self.__impl__) +\n ' not available')\n",
"role": "user"
},
{
"content": "p_obs",
"role": "assistant"
}
] |
def x(self, observations, weights):
N = self.nstates
K = len(observations)
self._means = np.zeros(N)
w_sum = np.zeros(N)
for k in range(K):
for i in range(N):
self.means[i] += np.dot(weights[k][:, i], observations[k])
w_sum += np.sum(weights[k], axis=0)
self._means /= w_sum
self._sigmas = np.zeros(N)
w_sum = np.zeros(N)
for k in range(K):
for i in range(N):
Y = (observations[k] - self.means[i]) ** 2
self.sigmas[i] += np.dot(weights[k][:, i], Y)
w_sum += np.sum(weights[k], axis=0)
self._sigmas /= w_sum
self._sigmas = np.sqrt(self.sigmas)
if np.any(self._sigmas < np.finfo(self._sigmas.dtype).eps):
raise RuntimeError('at least one sigma is too small to continue.')
|
estimate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, observations, weights):\n N = self.nstates\n K = len(observations)\n self._means = np.zeros(N)\n w_sum = np.zeros(N)\n for k in range(K):\n for i in range(N):\n self.means[i] += np.dot(weights[k][:, i], observations[k])\n w_sum += np.sum(weights[k], axis=0)\n self._means /= w_sum\n self._sigmas = np.zeros(N)\n w_sum = np.zeros(N)\n for k in range(K):\n for i in range(N):\n Y = (observations[k] - self.means[i]) ** 2\n self.sigmas[i] += np.dot(weights[k][:, i], Y)\n w_sum += np.sum(weights[k], axis=0)\n self._sigmas /= w_sum\n self._sigmas = np.sqrt(self.sigmas)\n if np.any(self._sigmas < np.finfo(self._sigmas.dtype).eps):\n raise RuntimeError('at least one sigma is too small to continue.')\n",
"role": "user"
},
{
"content": "estimate",
"role": "assistant"
}
] |
def x(self, observations, prior=None):
for state_index in range(self.nstates):
observations_in_state = observations[state_index]
nsamples_in_state = len(observations_in_state)
if nsamples_in_state == 0:
logger().warn('Warning: State %d has no observations.' %
state_index)
if nsamples_in_state > 0:
self.means[state_index] = np.random.randn() * self.sigmas[
state_index] / np.sqrt(nsamples_in_state) + np.mean(
observations_in_state)
if nsamples_in_state > 1:
chisquared = np.random.chisquare(nsamples_in_state - 1)
sigmahat2 = np.mean((observations_in_state - self.means[
state_index]) ** 2)
self.sigmas[state_index] = np.sqrt(sigmahat2) / np.sqrt(
chisquared / nsamples_in_state)
return
|
sample
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, observations, prior=None):\n for state_index in range(self.nstates):\n observations_in_state = observations[state_index]\n nsamples_in_state = len(observations_in_state)\n if nsamples_in_state == 0:\n logger().warn('Warning: State %d has no observations.' %\n state_index)\n if nsamples_in_state > 0:\n self.means[state_index] = np.random.randn() * self.sigmas[\n state_index] / np.sqrt(nsamples_in_state) + np.mean(\n observations_in_state)\n if nsamples_in_state > 1:\n chisquared = np.random.chisquare(nsamples_in_state - 1)\n sigmahat2 = np.mean((observations_in_state - self.means[\n state_index]) ** 2)\n self.sigmas[state_index] = np.sqrt(sigmahat2) / np.sqrt(\n chisquared / nsamples_in_state)\n return\n",
"role": "user"
},
{
"content": "sample",
"role": "assistant"
}
] |
def x(self, state_index):
observation = self.sigmas[state_index] * np.random.randn() + self.means[
state_index]
return observation
|
generate_observation_from_state
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, state_index):\n observation = self.sigmas[state_index] * np.random.randn() + self.means[\n state_index]\n return observation\n",
"role": "user"
},
{
"content": "generate_observation_from_state",
"role": "assistant"
}
] |
def x(self, state_index, nobs):
observations = self.sigmas[state_index] * np.random.randn(nobs
) + self.means[state_index]
return observations
|
generate_observations_from_state
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, state_index, nobs):\n observations = self.sigmas[state_index] * np.random.randn(nobs\n ) + self.means[state_index]\n return observations\n",
"role": "user"
},
{
"content": "generate_observations_from_state",
"role": "assistant"
}
] |
def x(self, s_t):
T = s_t.shape[0]
o_t = np.zeros([T], dtype=config.dtype)
for t in range(T):
s = s_t[t]
o_t[t] = self.sigmas[s] * np.random.randn() + self.means[s]
return o_t
|
generate_observation_trajectory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, s_t):\n T = s_t.shape[0]\n o_t = np.zeros([T], dtype=config.dtype)\n for t in range(T):\n s = s_t[t]\n o_t[t] = self.sigmas[s] * np.random.randn() + self.means[s]\n return o_t\n",
"role": "user"
},
{
"content": "generate_observation_trajectory",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].stationary_distribution
return res
|
initial_distribution_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)\n for i in range(self.nsamples):\n res[i, :] = self._sampled_hmms[i].stationary_distribution\n return res\n",
"role": "user"
},
{
"content": "initial_distribution_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=
config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].transition_matrix
return res
|
transition_matrix_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=\n config.dtype)\n for i in range(self.nsamples):\n res[i, :, :] = self._sampled_hmms[i].transition_matrix\n return res\n",
"role": "user"
},
{
"content": "transition_matrix_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].eigenvalues
return res
|
eigenvalues_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)\n for i in range(self.nsamples):\n res[i, :] = self._sampled_hmms[i].eigenvalues\n return res\n",
"role": "user"
},
{
"content": "eigenvalues_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=
config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_left
return res
|
eigenvectors_left_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=\n config.dtype)\n for i in range(self.nsamples):\n res[i, :, :] = self._sampled_hmms[i].eigenvectors_left\n return res\n",
"role": "user"
},
{
"content": "eigenvectors_left_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=
config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_right
return res
|
eigenvectors_right_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=\n config.dtype)\n for i in range(self.nsamples):\n res[i, :, :] = self._sampled_hmms[i].eigenvectors_right\n return res\n",
"role": "user"
},
{
"content": "eigenvectors_right_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates - 1), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].timescales
return res
|
timescales_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates - 1), dtype=config.dtype)\n for i in range(self.nsamples):\n res[i, :] = self._sampled_hmms[i].timescales\n return res\n",
"role": "user"
},
{
"content": "timescales_samples",
"role": "assistant"
}
] |
def x(self):
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].lifetimes
return res
|
lifetimes_samples
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)\n for i in range(self.nsamples):\n res[i, :] = self._sampled_hmms[i].lifetimes\n return res\n",
"role": "user"
},
{
"content": "lifetimes_samples",
"role": "assistant"
}
] |
def x(self, impl):
if impl.lower() == 'python':
self.__impl__ = self.__IMPL_PYTHON__
elif impl.lower() == 'c':
self.__impl__ = self.__IMPL_C__
else:
import warnings
warnings.warn('Implementation ' + impl +
' is not known. Using the fallback python implementation.')
self.__impl__ = self.__IMPL_PYTHON__
|
set_implementation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, impl):\n if impl.lower() == 'python':\n self.__impl__ = self.__IMPL_PYTHON__\n elif impl.lower() == 'c':\n self.__impl__ = self.__IMPL_C__\n else:\n import warnings\n warnings.warn('Implementation ' + impl +\n ' is not known. Using the fallback python implementation.')\n self.__impl__ = self.__IMPL_PYTHON__\n",
"role": "user"
},
{
"content": "set_implementation",
"role": "assistant"
}
] |
def x(self, obs, out=None, dtype=np.float32):
if out is None:
return np.log(self.p_obs(obs))
else:
self.p_obs(obs, out=out, dtype=dtype)
np.log(out, out=out)
return out
|
log_p_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, obs, out=None, dtype=np.float32):\n if out is None:\n return np.log(self.p_obs(obs))\n else:\n self.p_obs(obs, out=out, dtype=dtype)\n np.log(out, out=out)\n return out\n",
"role": "user"
},
{
"content": "log_p_obs",
"role": "assistant"
}
] |
def x(self, p_o):
if self.ignore_outliers:
outliers = np.where(p_o.sum(axis=1) == 0)[0]
if outliers.size > 0:
p_o[outliers, :] = 1.0
self.found_outliers = True
return p_o
|
handle_outliers
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, p_o):\n if self.ignore_outliers:\n outliers = np.where(p_o.sum(axis=1) == 0)[0]\n if outliers.size > 0:\n p_o[outliers, :] = 1.0\n self.found_outliers = True\n return p_o\n",
"role": "user"
},
{
"content": "handle_outliers",
"role": "assistant"
}
] |
def x(A, pobs, pi, T=None, alpha_out=None, dtype=np.float32):
if T is None:
T = pobs.shape[0]
elif T > pobs.shape[0]:
raise ValueError('T must be at most the length of pobs.')
N = A.shape[0]
if alpha_out is None:
alpha_out = np.zeros((T, N), dtype=dtype)
elif T > alpha_out.shape[0]:
raise ValueError(
'alpha_out must at least have length T in order to fit trajectory.'
)
logprob = 0.0
np.multiply(pi, pobs[0, :], out=alpha_out[0])
scale = np.sum(alpha_out[0, :])
alpha_out[0, :] /= scale
logprob += np.log(scale)
for t in range(T - 1):
np.multiply(np.dot(alpha_out[t, :], A), pobs[t + 1, :], out=
alpha_out[t + 1])
scale = np.sum(alpha_out[t + 1, :])
alpha_out[t + 1, :] /= scale
logprob += np.log(scale)
return logprob, alpha_out
|
forward
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, pi, T=None, alpha_out=None, dtype=np.float32):\n if T is None:\n T = pobs.shape[0]\n elif T > pobs.shape[0]:\n raise ValueError('T must be at most the length of pobs.')\n N = A.shape[0]\n if alpha_out is None:\n alpha_out = np.zeros((T, N), dtype=dtype)\n elif T > alpha_out.shape[0]:\n raise ValueError(\n 'alpha_out must at least have length T in order to fit trajectory.'\n )\n logprob = 0.0\n np.multiply(pi, pobs[0, :], out=alpha_out[0])\n scale = np.sum(alpha_out[0, :])\n alpha_out[0, :] /= scale\n logprob += np.log(scale)\n for t in range(T - 1):\n np.multiply(np.dot(alpha_out[t, :], A), pobs[t + 1, :], out=\n alpha_out[t + 1])\n scale = np.sum(alpha_out[t + 1, :])\n alpha_out[t + 1, :] /= scale\n logprob += np.log(scale)\n return logprob, alpha_out\n",
"role": "user"
},
{
"content": "forward",
"role": "assistant"
}
] |
def x(A, pobs, T=None, beta_out=None, dtype=np.float32):
if T is None:
T = pobs.shape[0]
elif T > pobs.shape[0]:
raise ValueError('T must be at most the length of pobs.')
N = A.shape[0]
if beta_out is None:
beta_out = np.zeros((T, N), dtype=dtype)
elif T > beta_out.shape[0]:
raise ValueError(
'beta_out must at least have length T in order to fit trajectory.')
beta_out[T - 1, :] = 1.0
scale = np.sum(beta_out[T - 1, :])
beta_out[T - 1, :] /= scale
for t in range(T - 2, -1, -1):
np.dot(A, beta_out[t + 1, :] * pobs[t + 1, :], out=beta_out[t, :])
scale = np.sum(beta_out[t, :])
beta_out[t, :] /= scale
return beta_out
|
backward
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, T=None, beta_out=None, dtype=np.float32):\n if T is None:\n T = pobs.shape[0]\n elif T > pobs.shape[0]:\n raise ValueError('T must be at most the length of pobs.')\n N = A.shape[0]\n if beta_out is None:\n beta_out = np.zeros((T, N), dtype=dtype)\n elif T > beta_out.shape[0]:\n raise ValueError(\n 'beta_out must at least have length T in order to fit trajectory.')\n beta_out[T - 1, :] = 1.0\n scale = np.sum(beta_out[T - 1, :])\n beta_out[T - 1, :] /= scale\n for t in range(T - 2, -1, -1):\n np.dot(A, beta_out[t + 1, :] * pobs[t + 1, :], out=beta_out[t, :])\n scale = np.sum(beta_out[t, :])\n beta_out[t, :] /= scale\n return beta_out\n",
"role": "user"
},
{
"content": "backward",
"role": "assistant"
}
] |
def x(alpha, beta, A, pobs, T=None, out=None, dtype=np.float32):
if T is None:
T = pobs.shape[0]
elif T > pobs.shape[0]:
raise ValueError('T must be at most the length of pobs.')
N = len(A)
if out is None:
out = np.zeros((N, N), dtype=dtype, order='C')
else:
out[:] = 0.0
xi = np.zeros((N, N), dtype=dtype, order='C')
for t in range(T - 1):
np.dot(alpha[t, :][:, None] * A, np.diag(pobs[t + 1, :] * beta[t +
1, :]), out=xi)
xi /= np.sum(xi)
np.add(out, xi, out)
return out
|
transition_counts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alpha, beta, A, pobs, T=None, out=None, dtype=np.float32):\n if T is None:\n T = pobs.shape[0]\n elif T > pobs.shape[0]:\n raise ValueError('T must be at most the length of pobs.')\n N = len(A)\n if out is None:\n out = np.zeros((N, N), dtype=dtype, order='C')\n else:\n out[:] = 0.0\n xi = np.zeros((N, N), dtype=dtype, order='C')\n for t in range(T - 1):\n np.dot(alpha[t, :][:, None] * A, np.diag(pobs[t + 1, :] * beta[t + \n 1, :]), out=xi)\n xi /= np.sum(xi)\n np.add(out, xi, out)\n return out\n",
"role": "user"
},
{
"content": "transition_counts",
"role": "assistant"
}
] |
def x(A, pobs, pi, dtype=np.float32):
T, N = pobs.shape[0], pobs.shape[1]
psi = np.zeros((T, N), dtype=int)
v = pi * pobs[0, :]
v /= v.sum()
psi[0] = 0.0
for t in range(1, T):
vA = np.dot(np.diag(v), A)
v = pobs[t, :] * np.max(vA, axis=0)
v /= v.sum()
psi[t] = np.argmax(vA, axis=0)
q = np.zeros(T, dtype=int)
q[T - 1] = np.argmax(v)
for t in range(T - 2, -1, -1):
q[t] = psi[t + 1, q[t + 1]]
return q
|
viterbi
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, pi, dtype=np.float32):\n T, N = pobs.shape[0], pobs.shape[1]\n psi = np.zeros((T, N), dtype=int)\n v = pi * pobs[0, :]\n v /= v.sum()\n psi[0] = 0.0\n for t in range(1, T):\n vA = np.dot(np.diag(v), A)\n v = pobs[t, :] * np.max(vA, axis=0)\n v /= v.sum()\n psi[t] = np.argmax(vA, axis=0)\n q = np.zeros(T, dtype=int)\n q[T - 1] = np.argmax(v)\n for t in range(T - 2, -1, -1):\n q[t] = psi[t + 1, q[t + 1]]\n return q\n",
"role": "user"
},
{
"content": "viterbi",
"role": "assistant"
}
] |
def x(alpha, A, pobs, T=None, dtype=np.float32):
N = pobs.shape[1]
if T is None:
T = pobs.shape[0]
elif T > pobs.shape[0] or T > alpha.shape[0]:
raise ValueError('T must be at most the length of pobs and alpha.')
S = np.zeros(T, dtype=int)
psel = alpha[T - 1, :]
psel /= psel.sum()
S[T - 1] = np.random.choice(range(N), size=1, p=psel)
for t in range(T - 2, -1, -1):
psel = alpha[t, :] * A[:, S[t + 1]]
psel /= psel.sum()
S[t] = np.random.choice(range(N), size=1, p=psel)
return S
|
sample_path
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alpha, A, pobs, T=None, dtype=np.float32):\n N = pobs.shape[1]\n if T is None:\n T = pobs.shape[0]\n elif T > pobs.shape[0] or T > alpha.shape[0]:\n raise ValueError('T must be at most the length of pobs and alpha.')\n S = np.zeros(T, dtype=int)\n psel = alpha[T - 1, :]\n psel /= psel.sum()\n S[T - 1] = np.random.choice(range(N), size=1, p=psel)\n for t in range(T - 2, -1, -1):\n psel = alpha[t, :] * A[:, S[t + 1]]\n psel /= psel.sum()\n S[t] = np.random.choice(range(N), size=1, p=psel)\n return S\n",
"role": "user"
},
{
"content": "sample_path",
"role": "assistant"
}
] |
def x(P, M):
W = np.linalg.inv(np.dot(M.T, M))
A = np.dot(np.dot(M.T, P), M)
P_coarse = np.dot(W, A)
P_coarse = np.maximum(P_coarse, 0)
P_coarse /= P_coarse.sum(axis=1)[:, None]
return P_coarse
|
coarse_grain_transition_matrix
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(P, M):\n W = np.linalg.inv(np.dot(M.T, M))\n A = np.dot(np.dot(M.T, P), M)\n P_coarse = np.dot(W, A)\n P_coarse = np.maximum(P_coarse, 0)\n P_coarse /= P_coarse.sum(axis=1)[:, None]\n return P_coarse\n",
"role": "user"
},
{
"content": "coarse_grain_transition_matrix",
"role": "assistant"
}
] |
def x(p0, P, reversible=True, stationary=False, C=None, eps=None):
n = P.shape[0]
if eps is None:
eps = 0.01 / n
P = np.maximum(P, eps)
P /= P.sum(axis=1)[:, None]
if reversible:
P = _tmatrix_disconnected.enforce_reversible_on_closed(P)
if stationary:
_tmatrix_disconnected.stationary_distribution(P, C=C)
else:
p0 = np.maximum(p0, eps)
p0 /= p0.sum()
return p0, P
|
regularize_hidden
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(p0, P, reversible=True, stationary=False, C=None, eps=None):\n n = P.shape[0]\n if eps is None:\n eps = 0.01 / n\n P = np.maximum(P, eps)\n P /= P.sum(axis=1)[:, None]\n if reversible:\n P = _tmatrix_disconnected.enforce_reversible_on_closed(P)\n if stationary:\n _tmatrix_disconnected.stationary_distribution(P, C=C)\n else:\n p0 = np.maximum(p0, eps)\n p0 /= p0.sum()\n return p0, P\n",
"role": "user"
},
{
"content": "regularize_hidden",
"role": "assistant"
}
] |
def x(B, nonempty=None, separate=None, eps=None):
B = B.copy()
n, m = B.shape
if eps is None:
eps = 0.01 / m
if nonempty is None:
nonempty = np.arange(m)
if separate is None:
B[:, nonempty] = np.maximum(B[:, nonempty], eps)
else:
nonempty_nonseparate = np.array(list(set(nonempty) - set(separate)),
dtype=int)
nonempty_separate = np.array(list(set(nonempty).intersection(set(
separate))), dtype=int)
B[:n - 1, nonempty_nonseparate] = np.maximum(B[:n - 1,
nonempty_nonseparate], eps)
B[n - 1, nonempty_separate] = np.maximum(B[n - 1, nonempty_separate
], eps)
B /= B.sum(axis=1)[:, None]
return B
|
regularize_pobs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(B, nonempty=None, separate=None, eps=None):\n B = B.copy()\n n, m = B.shape\n if eps is None:\n eps = 0.01 / m\n if nonempty is None:\n nonempty = np.arange(m)\n if separate is None:\n B[:, nonempty] = np.maximum(B[:, nonempty], eps)\n else:\n nonempty_nonseparate = np.array(list(set(nonempty) - set(separate)),\n dtype=int)\n nonempty_separate = np.array(list(set(nonempty).intersection(set(\n separate))), dtype=int)\n B[:n - 1, nonempty_nonseparate] = np.maximum(B[:n - 1,\n nonempty_nonseparate], eps)\n B[n - 1, nonempty_separate] = np.maximum(B[n - 1, nonempty_separate\n ], eps)\n B /= B.sum(axis=1)[:, None]\n return B\n",
"role": "user"
},
{
"content": "regularize_pobs",
"role": "assistant"
}
] |
def x(C_full, nstates, reversible=True, stationary=True, active_set=None, P
=None, eps_A=None, eps_B=None, separate=None):
nfull = C_full.shape[0]
if eps_A is None:
eps_A = 0.01 / nstates
if eps_B is None:
eps_B = 0.01 / nfull
symsum = C_full.sum(axis=0) + C_full.sum(axis=1)
nonempty = np.where(symsum > 0)[0]
if active_set is None:
active_set = nonempty
elif np.any(symsum[active_set] == 0):
raise ValueError('Given active set has empty states')
if P is not None:
if np.shape(P)[0] != active_set.size:
raise ValueError('Given initial transition matrix P has shape ' +
str(np.shape(P)) + 'while active set has size ' + str(
active_set.size))
if separate is None:
active_nonseparate = active_set.copy()
nmeta = nstates
else:
if np.max(separate) >= nfull:
raise ValueError(
'Separate set has indexes that do not exist in full state space: '
+ str(np.max(separate)))
active_nonseparate = np.array(list(set(active_set) - set(separate)))
nmeta = nstates - 1
if active_nonseparate.size < nmeta:
raise NotImplementedError('Trying to initialize ' + str(nmeta) +
'-state HMM from smaller ' + str(active_nonseparate.size) +
'-state MSM.')
C_active = C_full[np.ix_(active_set, active_set)]
if P is None:
P_active = _tmatrix_disconnected.estimate_P(C_active, reversible=
reversible, maxiter=10000)
else:
P_active = P
pi_active = _tmatrix_disconnected.stationary_distribution(P_active, C=
C_active)
pi_full = np.zeros(nfull)
pi_full[active_set] = pi_active
C_active_nonseparate = C_full[np.ix_(active_nonseparate,
active_nonseparate)]
if reversible and separate is None:
P_active_nonseparate = P_active
else:
P_active_nonseparate = _tmatrix_disconnected.estimate_P(
C_active_nonseparate, reversible=True)
if active_nonseparate.size > nmeta:
from msmtools.analysis.dense.pcca import PCCA
pcca_obj = PCCA(P_active_nonseparate, nmeta)
M_active_nonseparate = pcca_obj.memberships
B_active_nonseparate = pcca_obj.output_probabilities
else:
M_active_nonseparate = np.eye(nmeta)
B_active_nonseparate = np.eye(nmeta)
if separate is None:
M_active = M_active_nonseparate
else:
M_full = np.zeros((nfull, nstates))
M_full[active_nonseparate, :nmeta] = M_active_nonseparate
M_full[separate, -1] = 1
M_active = M_full[active_set]
P_hmm = coarse_grain_transition_matrix(P_active, M_active)
if reversible:
P_hmm = _tmatrix_disconnected.enforce_reversible_on_closed(P_hmm)
C_hmm = M_active.T.dot(C_active).dot(M_active)
pi_hmm = _tmatrix_disconnected.stationary_distribution(P_hmm, C=C_hmm)
B_hmm = np.zeros((nstates, nfull))
B_hmm[:nmeta, active_nonseparate] = B_active_nonseparate
if separate is not None:
B_hmm[-1, separate] = pi_full[separate]
pi_hmm, P_hmm = regularize_hidden(pi_hmm, P_hmm, reversible=reversible,
stationary=stationary, C=C_hmm, eps=eps_A)
B_hmm = regularize_pobs(B_hmm, nonempty=nonempty, separate=separate,
eps=eps_B)
logger().info('Initial model: ')
logger().info('initial distribution = \n' + str(pi_hmm))
logger().info('transition matrix = \n' + str(P_hmm))
logger().info('output matrix = \n' + str(B_hmm.T))
return pi_hmm, P_hmm, B_hmm
|
init_discrete_hmm_spectral
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(C_full, nstates, reversible=True, stationary=True, active_set=None, P\n =None, eps_A=None, eps_B=None, separate=None):\n nfull = C_full.shape[0]\n if eps_A is None:\n eps_A = 0.01 / nstates\n if eps_B is None:\n eps_B = 0.01 / nfull\n symsum = C_full.sum(axis=0) + C_full.sum(axis=1)\n nonempty = np.where(symsum > 0)[0]\n if active_set is None:\n active_set = nonempty\n elif np.any(symsum[active_set] == 0):\n raise ValueError('Given active set has empty states')\n if P is not None:\n if np.shape(P)[0] != active_set.size:\n raise ValueError('Given initial transition matrix P has shape ' +\n str(np.shape(P)) + 'while active set has size ' + str(\n active_set.size))\n if separate is None:\n active_nonseparate = active_set.copy()\n nmeta = nstates\n else:\n if np.max(separate) >= nfull:\n raise ValueError(\n 'Separate set has indexes that do not exist in full state space: '\n + str(np.max(separate)))\n active_nonseparate = np.array(list(set(active_set) - set(separate)))\n nmeta = nstates - 1\n if active_nonseparate.size < nmeta:\n raise NotImplementedError('Trying to initialize ' + str(nmeta) +\n '-state HMM from smaller ' + str(active_nonseparate.size) +\n '-state MSM.')\n C_active = C_full[np.ix_(active_set, active_set)]\n if P is None:\n P_active = _tmatrix_disconnected.estimate_P(C_active, reversible=\n reversible, maxiter=10000)\n else:\n P_active = P\n pi_active = _tmatrix_disconnected.stationary_distribution(P_active, C=\n C_active)\n pi_full = np.zeros(nfull)\n pi_full[active_set] = pi_active\n C_active_nonseparate = C_full[np.ix_(active_nonseparate,\n active_nonseparate)]\n if reversible and separate is None:\n P_active_nonseparate = P_active\n else:\n P_active_nonseparate = _tmatrix_disconnected.estimate_P(\n C_active_nonseparate, reversible=True)\n if active_nonseparate.size > nmeta:\n from msmtools.analysis.dense.pcca import PCCA\n pcca_obj = PCCA(P_active_nonseparate, nmeta)\n M_active_nonseparate = pcca_obj.memberships\n B_active_nonseparate = pcca_obj.output_probabilities\n else:\n M_active_nonseparate = np.eye(nmeta)\n B_active_nonseparate = np.eye(nmeta)\n if separate is None:\n M_active = M_active_nonseparate\n else:\n M_full = np.zeros((nfull, nstates))\n M_full[active_nonseparate, :nmeta] = M_active_nonseparate\n M_full[separate, -1] = 1\n M_active = M_full[active_set]\n P_hmm = coarse_grain_transition_matrix(P_active, M_active)\n if reversible:\n P_hmm = _tmatrix_disconnected.enforce_reversible_on_closed(P_hmm)\n C_hmm = M_active.T.dot(C_active).dot(M_active)\n pi_hmm = _tmatrix_disconnected.stationary_distribution(P_hmm, C=C_hmm)\n B_hmm = np.zeros((nstates, nfull))\n B_hmm[:nmeta, active_nonseparate] = B_active_nonseparate\n if separate is not None:\n B_hmm[-1, separate] = pi_full[separate]\n pi_hmm, P_hmm = regularize_hidden(pi_hmm, P_hmm, reversible=reversible,\n stationary=stationary, C=C_hmm, eps=eps_A)\n B_hmm = regularize_pobs(B_hmm, nonempty=nonempty, separate=separate,\n eps=eps_B)\n logger().info('Initial model: ')\n logger().info('initial distribution = \\n' + str(pi_hmm))\n logger().info('transition matrix = \\n' + str(P_hmm))\n logger().info('output matrix = \\n' + str(B_hmm.T))\n return pi_hmm, P_hmm, B_hmm\n",
"role": "user"
},
{
"content": "init_discrete_hmm_spectral",
"role": "assistant"
}
] |
def x(C_full, nstates, reversible=True, stationary=True, active_set=None, P
=None, eps_A=None, eps_B=None, separate=None):
raise NotImplementedError('ML-initialization not yet implemented')
|
init_discrete_hmm_ml
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(C_full, nstates, reversible=True, stationary=True, active_set=None, P\n =None, eps_A=None, eps_B=None, separate=None):\n raise NotImplementedError('ML-initialization not yet implemented')\n",
"role": "user"
},
{
"content": "init_discrete_hmm_ml",
"role": "assistant"
}
] |
def x(self, Pi, Tij):
from msmtools import analysis as msmana
self._Tij = np.array(Tij)
assert msmana.is_transition_matrix(self._Tij
), 'Given transition matrix is not a stochastic matrix'
assert self._Tij.shape[0
] == self._nstates, 'Given transition matrix has unexpected number of states '
self._spectral_decomp_available = False
assert np.all(Pi >= 0
), 'Given initial distribution contains negative elements.'
assert np.any(Pi > 0), 'Given initial distribution is zero'
self._Pi = np.array(Pi) / np.sum(Pi)
|
update
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, Pi, Tij):\n from msmtools import analysis as msmana\n self._Tij = np.array(Tij)\n assert msmana.is_transition_matrix(self._Tij\n ), 'Given transition matrix is not a stochastic matrix'\n assert self._Tij.shape[0\n ] == self._nstates, 'Given transition matrix has unexpected number of states '\n self._spectral_decomp_available = False\n assert np.all(Pi >= 0\n ), 'Given initial distribution contains negative elements.'\n assert np.any(Pi > 0), 'Given initial distribution is zero'\n self._Pi = np.array(Pi) / np.sum(Pi)\n",
"role": "user"
},
{
"content": "update",
"role": "assistant"
}
] |
def x(self):
return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)
|
is_stationary
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)\n",
"role": "user"
},
{
"content": "is_stationary",
"role": "assistant"
}
] |
def x(self):
assert _tmatrix_disconnected.is_connected(self._Tij, strong=False
), 'No unique stationary distribution because transition matrix is not connected'
import msmtools.analysis as msmana
return msmana.stationary_distribution(self._Tij)
|
stationary_distribution
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n assert _tmatrix_disconnected.is_connected(self._Tij, strong=False\n ), 'No unique stationary distribution because transition matrix is not connected'\n import msmtools.analysis as msmana\n return msmana.stationary_distribution(self._Tij)\n",
"role": "user"
},
{
"content": "stationary_distribution",
"role": "assistant"
}
] |
def x(self):
from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales
self._ensure_spectral_decomposition()
ts = _timescales(self._eigenvalues, tau=self._lag)
return ts[1:]
|
timescales
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales\n self._ensure_spectral_decomposition()\n ts = _timescales(self._eigenvalues, tau=self._lag)\n return ts[1:]\n",
"role": "user"
},
{
"content": "timescales",
"role": "assistant"
}
] |
def x(self):
return -self._lag / np.log(np.diag(self.transition_matrix))
|
lifetimes
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return -self._lag / np.log(np.diag(self.transition_matrix))\n",
"role": "user"
},
{
"content": "lifetimes",
"role": "assistant"
}
] |
def x(self, states):
pi_sub = self._Pi[states]
pi_sub /= pi_sub.sum()
P_sub = self._Tij[states, :][:, states]
assert np.all(P_sub.sum(axis=1) > 0
), 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(
states)
P_sub /= P_sub.sum(axis=1)[:, None]
out_sub = self.output_model.sub_output_model(states)
return HMM(pi_sub, P_sub, out_sub, lag=self.lag)
|
sub_hmm
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, states):\n pi_sub = self._Pi[states]\n pi_sub /= pi_sub.sum()\n P_sub = self._Tij[states, :][:, states]\n assert np.all(P_sub.sum(axis=1) > 0\n ), 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(\n states)\n P_sub /= P_sub.sum(axis=1)[:, None]\n out_sub = self.output_model.sub_output_model(states)\n return HMM(pi_sub, P_sub, out_sub, lag=self.lag)\n",
"role": "user"
},
{
"content": "sub_hmm",
"role": "assistant"
}
] |
def x(self):
if self.hidden_state_trajectories is None:
raise RuntimeError('HMM model does not have a hidden state trajectory.'
)
C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self
._nstates)
return C.toarray()
|
count_matrix
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.hidden_state_trajectories is None:\n raise RuntimeError('HMM model does not have a hidden state trajectory.'\n )\n C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self\n ._nstates)\n return C.toarray()\n",
"role": "user"
},
{
"content": "count_matrix",
"role": "assistant"
}
] |
def x(self):
if self.hidden_state_trajectories is None:
raise RuntimeError('HMM model does not have a hidden state trajectory.'
)
n = [traj[0] for traj in self.hidden_state_trajectories]
return np.bincount(n, minlength=self.nstates)
|
count_init
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.hidden_state_trajectories is None:\n raise RuntimeError('HMM model does not have a hidden state trajectory.'\n )\n n = [traj[0] for traj in self.hidden_state_trajectories]\n return np.bincount(n, minlength=self.nstates)\n",
"role": "user"
},
{
"content": "count_init",
"role": "assistant"
}
] |
def x(self, observations, state_index):
if not self.hidden_state_trajectories:
raise RuntimeError('HMM model does not have a hidden state trajectory.'
)
dtype = observations[0].dtype
collected_observations = np.array([], dtype=dtype)
for s_t, o_t in zip(self.hidden_state_trajectories, observations):
indices = np.where(s_t == state_index)[0]
collected_observations = np.append(collected_observations, o_t[indices]
)
return collected_observations
|
collect_observations_in_state
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, observations, state_index):\n if not self.hidden_state_trajectories:\n raise RuntimeError('HMM model does not have a hidden state trajectory.'\n )\n dtype = observations[0].dtype\n collected_observations = np.array([], dtype=dtype)\n for s_t, o_t in zip(self.hidden_state_trajectories, observations):\n indices = np.where(s_t == state_index)[0]\n collected_observations = np.append(collected_observations, o_t[indices]\n )\n return collected_observations\n",
"role": "user"
},
{
"content": "collect_observations_in_state",
"role": "assistant"
}
] |
def x(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32):
if initial_Pi is not None and start is not None:
raise ValueError(
'Arguments initial_Pi and start are exclusive. Only set one of them.'
)
if start is None:
if initial_Pi is not None:
start = np.random.choice(range(self._nstates), size=1, p=initial_Pi
)
else:
start = np.random.choice(range(self._nstates), size=1, p=self._Pi)
from msmtools import generation as msmgen
traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start,
stop=stop, dt=1)
return traj.astype(dtype)
|
generate_synthetic_state_trajectory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32):\n if initial_Pi is not None and start is not None:\n raise ValueError(\n 'Arguments initial_Pi and start are exclusive. Only set one of them.'\n )\n if start is None:\n if initial_Pi is not None:\n start = np.random.choice(range(self._nstates), size=1, p=initial_Pi\n )\n else:\n start = np.random.choice(range(self._nstates), size=1, p=self._Pi)\n from msmtools import generation as msmgen\n traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start,\n stop=stop, dt=1)\n return traj.astype(dtype)\n",
"role": "user"
},
{
"content": "generate_synthetic_state_trajectory",
"role": "assistant"
}
] |
def x(self, length, initial_Pi=None):
s_t = self.generate_synthetic_state_trajectory(length, initial_Pi=
initial_Pi)
o_t = self.output_model.generate_observation_trajectory(s_t)
return [o_t, s_t]
|
generate_synthetic_observation_trajectory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, length, initial_Pi=None):\n s_t = self.generate_synthetic_state_trajectory(length, initial_Pi=\n initial_Pi)\n o_t = self.output_model.generate_observation_trajectory(s_t)\n return [o_t, s_t]\n",
"role": "user"
},
{
"content": "generate_synthetic_observation_trajectory",
"role": "assistant"
}
] |
def x(self, ntrajectories, length, initial_Pi=None):
O = list()
S = list()
for trajectory_index in range(ntrajectories):
o_t, s_t = self.generate_synthetic_observation_trajectory(length=
length, initial_Pi=initial_Pi)
O.append(o_t)
S.append(s_t)
return O, S
|
generate_synthetic_observation_trajectories
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, ntrajectories, length, initial_Pi=None):\n O = list()\n S = list()\n for trajectory_index in range(ntrajectories):\n o_t, s_t = self.generate_synthetic_observation_trajectory(length=\n length, initial_Pi=initial_Pi)\n O.append(o_t)\n S.append(s_t)\n return O, S\n",
"role": "user"
},
{
"content": "generate_synthetic_observation_trajectories",
"role": "assistant"
}
] |
def x(nb_path):
exporter = python.PythonExporter()
output, resources = exporter.from_filename(nb_path)
return output
|
nb_to_python
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nb_path):\n exporter = python.PythonExporter()\n output, resources = exporter.from_filename(nb_path)\n return output\n",
"role": "user"
},
{
"content": "nb_to_python",
"role": "assistant"
}
] |
def x(nb_path):
exporter = html.HTMLExporter(template_file='full')
output, resources = exporter.from_filename(nb_path)
header = output.split('<head>', 1)[1].split('</head>', 1)[0]
body = output.split('<body>', 1)[1].split('</body>', 1)[0]
header = header.replace('<style', '<style scoped="scoped"')
header = header.replace(
'body {\n overflow: visible;\n padding: 8px;\n}\n', '')
filter_strings = ['navbar', 'body{', 'alert{', 'uneditable-input{',
'collapse{']
filter_strings.extend([('h%s{' % (i + 1)) for i in range(6)])
header_lines = filter(lambda x: not any([(s in x) for s in
filter_strings]), header.split('\n'))
header = '\n'.join(header_lines)
lines = ['<div class="ipynotebook">']
lines.append(header)
lines.append(body)
lines.append('</div>')
return '\n'.join(lines)
|
nb_to_html
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(nb_path):\n exporter = html.HTMLExporter(template_file='full')\n output, resources = exporter.from_filename(nb_path)\n header = output.split('<head>', 1)[1].split('</head>', 1)[0]\n body = output.split('<body>', 1)[1].split('</body>', 1)[0]\n header = header.replace('<style', '<style scoped=\"scoped\"')\n header = header.replace(\n 'body {\\n overflow: visible;\\n padding: 8px;\\n}\\n', '')\n filter_strings = ['navbar', 'body{', 'alert{', 'uneditable-input{',\n 'collapse{']\n filter_strings.extend([('h%s{' % (i + 1)) for i in range(6)])\n header_lines = filter(lambda x: not any([(s in x) for s in\n filter_strings]), header.split('\\n'))\n header = '\\n'.join(header_lines)\n lines = ['<div class=\"ipynotebook\">']\n lines.append(header)\n lines.append(body)\n lines.append('</div>')\n return '\\n'.join(lines)\n",
"role": "user"
},
{
"content": "nb_to_html",
"role": "assistant"
}
] |
def x(self, obs, out=None):
if out is None:
out = self._output_probabilities[:, obs].T
return self._handle_outliers(out)
else:
if obs.shape[0] == out.shape[0]:
np.copyto(out, self._output_probabilities[:, obs].T)
elif obs.shape[0] < out.shape[0]:
out[:obs.shape[0], :] = self._output_probabilities[:, obs].T
else:
raise ValueError('output array out is too small: ' + str(out.
shape[0]) + ' < ' + str(obs.shape[0]))
return self._handle_outliers(out)
|
p_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, obs, out=None):\n if out is None:\n out = self._output_probabilities[:, obs].T\n return self._handle_outliers(out)\n else:\n if obs.shape[0] == out.shape[0]:\n np.copyto(out, self._output_probabilities[:, obs].T)\n elif obs.shape[0] < out.shape[0]:\n out[:obs.shape[0], :] = self._output_probabilities[:, obs].T\n else:\n raise ValueError('output array out is too small: ' + str(out.\n shape[0]) + ' < ' + str(obs.shape[0]))\n return self._handle_outliers(out)\n",
"role": "user"
},
{
"content": "p_obs",
"role": "assistant"
}
] |
def x(self, observations, weights):
N, M = self._output_probabilities.shape
K = len(observations)
self._output_probabilities = np.zeros((N, M))
if self.__impl__ == self.__IMPL_C__:
for k in range(K):
dc.update_pout(observations[k], weights[k], self.
_output_probabilities, dtype=config.dtype)
elif self.__impl__ == self.__IMPL_PYTHON__:
for k in range(K):
for o in range(M):
times = np.where(observations[k] == o)[0]
self._output_probabilities[:, o] += np.sum(weights[k][times,
:], axis=0)
else:
raise RuntimeError('Implementation ' + str(self.__impl__) +
' not available')
self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[
:, None]
|
estimate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, observations, weights):\n N, M = self._output_probabilities.shape\n K = len(observations)\n self._output_probabilities = np.zeros((N, M))\n if self.__impl__ == self.__IMPL_C__:\n for k in range(K):\n dc.update_pout(observations[k], weights[k], self.\n _output_probabilities, dtype=config.dtype)\n elif self.__impl__ == self.__IMPL_PYTHON__:\n for k in range(K):\n for o in range(M):\n times = np.where(observations[k] == o)[0]\n self._output_probabilities[:, o] += np.sum(weights[k][times,\n :], axis=0)\n else:\n raise RuntimeError('Implementation ' + str(self.__impl__) +\n ' not available')\n self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[\n :, None]\n",
"role": "user"
},
{
"content": "estimate",
"role": "assistant"
}
] |
def x(self, observations_by_state):
from numpy.random import dirichlet
N, M = self._output_probabilities.shape
for i, obs_by_state in enumerate(observations_by_state):
count = np.bincount(obs_by_state, minlength=M).astype(float)
count += self.prior[i]
positive = count > 0
self._output_probabilities[i, positive] = dirichlet(count[positive])
|
sample
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, observations_by_state):\n from numpy.random import dirichlet\n N, M = self._output_probabilities.shape\n for i, obs_by_state in enumerate(observations_by_state):\n count = np.bincount(obs_by_state, minlength=M).astype(float)\n count += self.prior[i]\n positive = count > 0\n self._output_probabilities[i, positive] = dirichlet(count[positive])\n",
"role": "user"
},
{
"content": "sample",
"role": "assistant"
}
] |
def x(self, state_index):
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(len(self.
_output_probabilities[state_index])), self._output_probabilities[
state_index]))
gen.rvs(size=1)
|
generate_observation_from_state
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, state_index):\n import scipy.stats\n gen = scipy.stats.rv_discrete(values=(range(len(self.\n _output_probabilities[state_index])), self._output_probabilities[\n state_index]))\n gen.rvs(size=1)\n",
"role": "user"
},
{
"content": "generate_observation_from_state",
"role": "assistant"
}
] |
def x(self, state_index, nobs):
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(self._nsymbols), self.
_output_probabilities[state_index]))
gen.rvs(size=nobs)
|
generate_observations_from_state
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, state_index, nobs):\n import scipy.stats\n gen = scipy.stats.rv_discrete(values=(range(self._nsymbols), self.\n _output_probabilities[state_index]))\n gen.rvs(size=nobs)\n",
"role": "user"
},
{
"content": "generate_observations_from_state",
"role": "assistant"
}
] |
def x(self, s_t, dtype=None):
if dtype is None:
dtype = np.int32
T = s_t.shape[0]
nsymbols = self._output_probabilities.shape[1]
if s_t.max() >= self.nstates or s_t.min() < 0:
msg = ''
msg += 's_t = %s\n' % s_t
msg += 's_t.min() = %d, s_t.max() = %d\n' % (s_t.min(), s_t.max())
msg += 's_t.argmax = %d\n' % s_t.argmax()
msg += 'self.nstates = %d\n' % self.nstates
msg += 's_t is out of bounds.\n'
raise Exception(msg)
o_t = np.zeros([T], dtype=dtype)
for t in range(T):
s = s_t[t]
o_t[t] = np.random.choice(nsymbols, p=self._output_probabilities[s, :])
return o_t
|
generate_observation_trajectory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, s_t, dtype=None):\n if dtype is None:\n dtype = np.int32\n T = s_t.shape[0]\n nsymbols = self._output_probabilities.shape[1]\n if s_t.max() >= self.nstates or s_t.min() < 0:\n msg = ''\n msg += 's_t = %s\\n' % s_t\n msg += 's_t.min() = %d, s_t.max() = %d\\n' % (s_t.min(), s_t.max())\n msg += 's_t.argmax = %d\\n' % s_t.argmax()\n msg += 'self.nstates = %d\\n' % self.nstates\n msg += 's_t is out of bounds.\\n'\n raise Exception(msg)\n o_t = np.zeros([T], dtype=dtype)\n for t in range(T):\n s = s_t[t]\n o_t[t] = np.random.choice(nsymbols, p=self._output_probabilities[s, :])\n return o_t\n",
"role": "user"
},
{
"content": "generate_observation_trajectory",
"role": "assistant"
}
] |
def x(impl):
global __impl__
if impl.lower() == 'python':
__impl__ = __IMPL_PYTHON__
elif impl.lower() == 'c':
__impl__ = __IMPL_C__
else:
import warnings
warnings.warn('Implementation ' + impl +
' is not known. Using the fallback python implementation.')
__impl__ = __IMPL_PYTHON__
|
set_implementation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(impl):\n global __impl__\n if impl.lower() == 'python':\n __impl__ = __IMPL_PYTHON__\n elif impl.lower() == 'c':\n __impl__ = __IMPL_C__\n else:\n import warnings\n warnings.warn('Implementation ' + impl +\n ' is not known. Using the fallback python implementation.')\n __impl__ = __IMPL_PYTHON__\n",
"role": "user"
},
{
"content": "set_implementation",
"role": "assistant"
}
] |
def x(A, pobs, pi, T=None, alpha_out=None):
if __impl__ == __IMPL_PYTHON__:
return ip.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=
config.dtype)
elif __impl__ == __IMPL_C__:
return ic.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=
config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: ' + str(
__impl__))
|
forward
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, pi, T=None, alpha_out=None):\n if __impl__ == __IMPL_PYTHON__:\n return ip.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=\n config.dtype)\n elif __impl__ == __IMPL_C__:\n return ic.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=\n config.dtype)\n else:\n raise RuntimeError('Nonexisting implementation selected: ' + str(\n __impl__))\n",
"role": "user"
},
{
"content": "forward",
"role": "assistant"
}
] |
def x(A, pobs, T=None, beta_out=None):
if __impl__ == __IMPL_PYTHON__:
return ip.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: ' + str(
__impl__))
|
backward
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, T=None, beta_out=None):\n if __impl__ == __IMPL_PYTHON__:\n return ip.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype)\n elif __impl__ == __IMPL_C__:\n return ic.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype)\n else:\n raise RuntimeError('Nonexisting implementation selected: ' + str(\n __impl__))\n",
"role": "user"
},
{
"content": "backward",
"role": "assistant"
}
] |
def x(alpha, beta, T=None, gamma_out=None):
global ones_size
if ones_size != alpha.shape[1]:
global ones
ones = np.ones(alpha.shape[1])[:, None]
ones_size = alpha.shape[1]
if alpha.shape[0] != beta.shape[0]:
raise ValueError('Inconsistent sizes of alpha and beta.')
if T is None:
if gamma_out is None:
T = alpha.shape[0]
else:
T = gamma_out.shape[0]
if gamma_out is None:
gamma_out = alpha * beta
if T < gamma_out.shape[0]:
gamma_out = gamma_out[:T]
elif gamma_out.shape[0] < alpha.shape[0]:
np.multiply(alpha[:T], beta[:T], gamma_out)
else:
np.multiply(alpha, beta, gamma_out)
np.divide(gamma_out, np.dot(gamma_out, ones), out=gamma_out)
return gamma_out
|
state_probabilities
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alpha, beta, T=None, gamma_out=None):\n global ones_size\n if ones_size != alpha.shape[1]:\n global ones\n ones = np.ones(alpha.shape[1])[:, None]\n ones_size = alpha.shape[1]\n if alpha.shape[0] != beta.shape[0]:\n raise ValueError('Inconsistent sizes of alpha and beta.')\n if T is None:\n if gamma_out is None:\n T = alpha.shape[0]\n else:\n T = gamma_out.shape[0]\n if gamma_out is None:\n gamma_out = alpha * beta\n if T < gamma_out.shape[0]:\n gamma_out = gamma_out[:T]\n elif gamma_out.shape[0] < alpha.shape[0]:\n np.multiply(alpha[:T], beta[:T], gamma_out)\n else:\n np.multiply(alpha, beta, gamma_out)\n np.divide(gamma_out, np.dot(gamma_out, ones), out=gamma_out)\n return gamma_out\n",
"role": "user"
},
{
"content": "state_probabilities",
"role": "assistant"
}
] |
def x(gamma, T, out=None):
return np.sum(gamma[0:T], axis=0, out=out)
|
state_counts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(gamma, T, out=None):\n return np.sum(gamma[0:T], axis=0, out=out)\n",
"role": "user"
},
{
"content": "state_counts",
"role": "assistant"
}
] |
def x(alpha, beta, A, pobs, T=None, out=None):
if __impl__ == __IMPL_PYTHON__:
return ip.transition_counts(alpha, beta, A, pobs, T=T, out=out,
dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.transition_counts(alpha, beta, A, pobs, T=T, out=out,
dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: ' + str(
__impl__))
|
transition_counts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alpha, beta, A, pobs, T=None, out=None):\n if __impl__ == __IMPL_PYTHON__:\n return ip.transition_counts(alpha, beta, A, pobs, T=T, out=out,\n dtype=config.dtype)\n elif __impl__ == __IMPL_C__:\n return ic.transition_counts(alpha, beta, A, pobs, T=T, out=out,\n dtype=config.dtype)\n else:\n raise RuntimeError('Nonexisting implementation selected: ' + str(\n __impl__))\n",
"role": "user"
},
{
"content": "transition_counts",
"role": "assistant"
}
] |
def x(A, pobs, pi):
if __impl__ == __IMPL_PYTHON__:
return ip.viterbi(A, pobs, pi, dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.viterbi(A, pobs, pi, dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: ' + str(
__impl__))
|
viterbi
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(A, pobs, pi):\n if __impl__ == __IMPL_PYTHON__:\n return ip.viterbi(A, pobs, pi, dtype=config.dtype)\n elif __impl__ == __IMPL_C__:\n return ic.viterbi(A, pobs, pi, dtype=config.dtype)\n else:\n raise RuntimeError('Nonexisting implementation selected: ' + str(\n __impl__))\n",
"role": "user"
},
{
"content": "viterbi",
"role": "assistant"
}
] |
def x(alpha, A, pobs, T=None):
if __impl__ == __IMPL_PYTHON__:
return ip.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: ' + str(
__impl__))
|
sample_path
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alpha, A, pobs, T=None):\n if __impl__ == __IMPL_PYTHON__:\n return ip.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)\n elif __impl__ == __IMPL_C__:\n return ic.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)\n else:\n raise RuntimeError('Nonexisting implementation selected: ' + str(\n __impl__))\n",
"role": "user"
},
{
"content": "sample_path",
"role": "assistant"
}
] |
def x(name='BHMM', pattern=
'%(asctime)s %(levelname)s %(name)s: %(message)s', date_format=
'%H:%M:%S', handler=logging.StreamHandler(sys.stdout)):
_logger = logging.getLogger(name)
_logger.setLevel(config.log_level())
if not _logger.handlers:
formatter = logging.Formatter(pattern, date_format)
handler.setFormatter(formatter)
handler.setLevel(config.log_level())
_logger.addHandler(handler)
_logger.propagate = False
return _logger
|
logger
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(name='BHMM', pattern=\n '%(asctime)s %(levelname)s %(name)s: %(message)s', date_format=\n '%H:%M:%S', handler=logging.StreamHandler(sys.stdout)):\n _logger = logging.getLogger(name)\n _logger.setLevel(config.log_level())\n if not _logger.handlers:\n formatter = logging.Formatter(pattern, date_format)\n handler.setFormatter(formatter)\n handler.setLevel(config.log_level())\n _logger.addHandler(handler)\n _logger.propagate = False\n return _logger\n",
"role": "user"
},
{
"content": "logger",
"role": "assistant"
}
] |
def x(self, nsamples, nburn=0, nthin=1, save_hidden_state_trajectory=False,
call_back=None):
for iteration in range(nburn):
logger().info('Burn-in %8d / %8d' % (iteration, nburn))
self._update()
models = list()
for iteration in range(nsamples):
logger().info('Iteration %8d / %8d' % (iteration, nsamples))
for thin in range(nthin):
self._update()
model_copy = copy.deepcopy(self.model)
if not save_hidden_state_trajectory:
model_copy.hidden_state_trajectory = None
models.append(model_copy)
if call_back is not None:
call_back()
return models
|
sample
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, nsamples, nburn=0, nthin=1, save_hidden_state_trajectory=False,\n call_back=None):\n for iteration in range(nburn):\n logger().info('Burn-in %8d / %8d' % (iteration, nburn))\n self._update()\n models = list()\n for iteration in range(nsamples):\n logger().info('Iteration %8d / %8d' % (iteration, nsamples))\n for thin in range(nthin):\n self._update()\n model_copy = copy.deepcopy(self.model)\n if not save_hidden_state_trajectory:\n model_copy.hidden_state_trajectory = None\n models.append(model_copy)\n if call_back is not None:\n call_back()\n return models\n",
"role": "user"
},
{
"content": "sample",
"role": "assistant"
}
] |
def x(self):
initial_time = time.time()
self._updateHiddenStateTrajectories()
self._updateEmissionProbabilities()
self._updateTransitionMatrix()
final_time = time.time()
elapsed_time = final_time - initial_time
logger().info('BHMM update iteration took %.3f s' % elapsed_time)
|
update
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n initial_time = time.time()\n self._updateHiddenStateTrajectories()\n self._updateEmissionProbabilities()\n self._updateTransitionMatrix()\n final_time = time.time()\n elapsed_time = final_time - initial_time\n logger().info('BHMM update iteration took %.3f s' % elapsed_time)\n",
"role": "user"
},
{
"content": "update",
"role": "assistant"
}
] |
def x(self):
self.model.hidden_state_trajectories = list()
for trajectory_index in range(self.nobs):
hidden_state_trajectory = self._sampleHiddenStateTrajectory(self.
observations[trajectory_index])
self.model.hidden_state_trajectories.append(hidden_state_trajectory)
return
|
updateHiddenStateTrajectories
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.model.hidden_state_trajectories = list()\n for trajectory_index in range(self.nobs):\n hidden_state_trajectory = self._sampleHiddenStateTrajectory(self.\n observations[trajectory_index])\n self.model.hidden_state_trajectories.append(hidden_state_trajectory)\n return\n",
"role": "user"
},
{
"content": "updateHiddenStateTrajectories",
"role": "assistant"
}
] |
def x(self, obs, dtype=np.int32):
T = obs.shape[0]
A = self.model.transition_matrix
pi = self.model.initial_distribution
self.model.output_model.p_obs(obs, out=self.pobs)
logprob = hidden.forward(A, self.pobs, pi, T=T, alpha_out=self.alpha)[0]
S = hidden.sample_path(self.alpha, A, self.pobs, T=T)
return S
|
sampleHiddenStateTrajectory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, obs, dtype=np.int32):\n T = obs.shape[0]\n A = self.model.transition_matrix\n pi = self.model.initial_distribution\n self.model.output_model.p_obs(obs, out=self.pobs)\n logprob = hidden.forward(A, self.pobs, pi, T=T, alpha_out=self.alpha)[0]\n S = hidden.sample_path(self.alpha, A, self.pobs, T=T)\n return S\n",
"role": "user"
},
{
"content": "sampleHiddenStateTrajectory",
"role": "assistant"
}
] |
def x(self):
observations_by_state = [self.model.collect_observations_in_state(self.
observations, state) for state in range(self.model.nstates)]
self.model.output_model.sample(observations_by_state)
|
updateEmissionProbabilities
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n observations_by_state = [self.model.collect_observations_in_state(self.\n observations, state) for state in range(self.model.nstates)]\n self.model.output_model.sample(observations_by_state)\n",
"role": "user"
},
{
"content": "updateEmissionProbabilities",
"role": "assistant"
}
] |
def x(self):
C = self.model.count_matrix() + self.prior_C
if self.reversible and not _tmatrix_disconnected.is_connected(C, strong
=True):
raise NotImplementedError(
"""Encountered disconnected count matrix with sampling option reversible:
"""
+ str(C) +
'\nUse prior to ensure connectivity or use reversible=False.')
P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=
10000, warn_not_converged=False)
zeros = np.where(P0 + P0.T == 0)
C[zeros] = 0
Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.
transition_matrix_sampling_steps, reversible=self.reversible)
if self.stationary:
p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C)
else:
n0 = self.model.count_init().astype(float)
first_timestep_counts_with_prior = n0 + self.prior_n0
positive = first_timestep_counts_with_prior > 0
p0 = np.zeros_like(n0)
p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior
[positive])
self.model.update(p0, Tij)
|
updateTransitionMatrix
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n C = self.model.count_matrix() + self.prior_C\n if self.reversible and not _tmatrix_disconnected.is_connected(C, strong\n =True):\n raise NotImplementedError(\n \"\"\"Encountered disconnected count matrix with sampling option reversible:\n \"\"\"\n + str(C) +\n '\\nUse prior to ensure connectivity or use reversible=False.')\n P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=\n 10000, warn_not_converged=False)\n zeros = np.where(P0 + P0.T == 0)\n C[zeros] = 0\n Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.\n transition_matrix_sampling_steps, reversible=self.reversible)\n if self.stationary:\n p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C)\n else:\n n0 = self.model.count_init().astype(float)\n first_timestep_counts_with_prior = n0 + self.prior_n0\n positive = first_timestep_counts_with_prior > 0\n p0 = np.zeros_like(n0)\n p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior\n [positive])\n self.model.update(p0, Tij)\n",
"role": "user"
},
{
"content": "updateTransitionMatrix",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.