sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def cubic_bucket_warp(x, n, l1, l2, l3, x0, w1, w2, w3):
"""Warps the length scale with a piecewise cubic "bucket" shape.
Parameters
----------
x : float or array-like of float
Locations to evaluate length scale at.
n : non-negative int
Derivative order to evaluate. Only first derivatives are supported.
l1 : positive float
Length scale to the left of the bucket.
l2 : positive float
Length scale in the bucket.
l3 : positive float
Length scale to the right of the bucket.
x0 : float
Location of the center of the bucket.
w1 : positive float
Width of the left side cubic section.
w2 : positive float
Width of the bucket.
w3 : positive float
Width of the right side cubic section.
"""
x1 = x0 - w2 / 2.0 - w1 / 2.0
x2 = x0 + w2 / 2.0 + w3 / 2.0
x_shift_1 = (x - x1 + w1 / 2.0) / w1
x_shift_2 = (x - x2 + w3 / 2.0) / w3
if n == 0:
return (
l1 * (x <= (x1 - w1 / 2.0)) + (
-2.0 * (l2 - l1) * (x_shift_1**3 - 3.0 / 2.0 * x_shift_1**2) + l1
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) +
l2 * ((x >= (x1 + w1 / 2.0)) & (x <= x2 - w3 / 2.0)) + (
-2.0 * (l3 - l2) * (x_shift_2**3 - 3.0 / 2.0 * x_shift_2**2) + l2
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0))) +
l3 * (x >= (x2 + w3 / 2.0))
)
elif n == 1:
return (
(
-2.0 * (l2 - l1) * (3 * x_shift_1**2 - 3.0 * x_shift_1) / w1
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) +
(
-2.0 * (l3 - l2) * (3 * x_shift_2**2 - 3.0 * x_shift_2) / w3
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0)))
)
else:
raise NotImplementedError("Only up to first derivatives are supported!")
|
Warps the length scale with a piecewise cubic "bucket" shape.
Parameters
----------
x : float or array-like of float
Locations to evaluate length scale at.
n : non-negative int
Derivative order to evaluate. Only first derivatives are supported.
l1 : positive float
Length scale to the left of the bucket.
l2 : positive float
Length scale in the bucket.
l3 : positive float
Length scale to the right of the bucket.
x0 : float
Location of the center of the bucket.
w1 : positive float
Width of the left side cubic section.
w2 : positive float
Width of the bucket.
w3 : positive float
Width of the right side cubic section.
|
entailment
|
def quintic_bucket_warp(x, n, l1, l2, l3, x0, w1, w2, w3):
"""Warps the length scale with a piecewise quintic "bucket" shape.
Parameters
----------
x : float or array-like of float
Locations to evaluate length scale at.
n : non-negative int
Derivative order to evaluate. Only first derivatives are supported.
l1 : positive float
Length scale to the left of the bucket.
l2 : positive float
Length scale in the bucket.
l3 : positive float
Length scale to the right of the bucket.
x0 : float
Location of the center of the bucket.
w1 : positive float
Width of the left side quintic section.
w2 : positive float
Width of the bucket.
w3 : positive float
Width of the right side quintic section.
"""
x1 = x0 - w2 / 2.0 - w1 / 2.0
x2 = x0 + w2 / 2.0 + w3 / 2.0
x_shift_1 = 2.0 * (x - x1) / w1
x_shift_3 = 2.0 * (x - x2) / w3
if n == 0:
return (
l1 * (x <= (x1 - w1 / 2.0)) + (
0.5 * (l2 - l1) * (
3.0 / 8.0 * x_shift_1**5 -
5.0 / 4.0 * x_shift_1**3 +
15.0 / 8.0 * x_shift_1
) + (l1 + l2) / 2.0
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) +
l2 * ((x >= (x1 + w1 / 2.0)) & (x <= x2 - w3 / 2.0)) + (
0.5 * (l3 - l2) * (
3.0 / 8.0 * x_shift_3**5 -
5.0 / 4.0 * x_shift_3**3 +
15.0 / 8.0 * x_shift_3
) + (l2 + l3) / 2.0
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0))) +
l3 * (x >= (x2 + w3 / 2.0))
)
elif n == 1:
return (
(
0.5 * (l2 - l1) * (
5.0 * 3.0 / 8.0 * x_shift_1**4 -
3.0 * 5.0 / 4.0 * x_shift_1**2 +
15.0 / 8.0
) / w1
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) + (
0.5 * (l3 - l2) * (
5.0 * 3.0 / 8.0 * x_shift_3**4 -
3.0 * 5.0 / 4.0 * x_shift_3**2 +
15.0 / 8.0
) / w3
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0)))
)
else:
raise NotImplementedError("Only up to first derivatives are supported!")
|
Warps the length scale with a piecewise quintic "bucket" shape.
Parameters
----------
x : float or array-like of float
Locations to evaluate length scale at.
n : non-negative int
Derivative order to evaluate. Only first derivatives are supported.
l1 : positive float
Length scale to the left of the bucket.
l2 : positive float
Length scale in the bucket.
l3 : positive float
Length scale to the right of the bucket.
x0 : float
Location of the center of the bucket.
w1 : positive float
Width of the left side quintic section.
w2 : positive float
Width of the bucket.
w3 : positive float
Width of the right side quintic section.
|
entailment
|
def exp_gauss_warp(X, n, l0, *msb):
"""Length scale function which is an exponential of a sum of Gaussians.
The centers and widths of the Gaussians are free parameters.
The length scale function is given by
.. math::
l = l_0 \exp\left ( \sum_{i=1}^{N}\beta_i\exp\left ( -\frac{(x-\mu_i)^2}{2\sigma_i^2} \right ) \right )
The number of parameters is equal to the three times the number of Gaussians
plus 1 (for :math:`l_0`). This function is inspired by what Gibbs used in
his PhD thesis.
Parameters
----------
X : 1d or 2d array of float
The points to evaluate the function at. If 2d, it should only have
one column (but this is not checked to save time).
n : int
The derivative order to compute. Used for all `X`.
l0 : float
The covariance length scale at the edges of the domain.
*msb : floats
Means, standard deviations and weights for each Gaussian, in that order.
"""
X = scipy.asarray(X, dtype=float)
msb = scipy.asarray(msb, dtype=float)
mm = msb[:len(msb) / 3]
ss = msb[len(msb) / 3:2 * len(msb) / 3]
bb = msb[2 * len(msb) / 3:]
# This is done with for-loops, because trying to get fancy with
# broadcasting was being too memory-intensive for some reason.
if n == 0:
l = scipy.zeros_like(X)
for m, s, b in zip(mm, ss, bb):
l += b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
l = l0 * scipy.exp(l)
return l
elif n == 1:
l1 = scipy.zeros_like(X)
l2 = scipy.zeros_like(X)
for m, s, b in zip(mm, ss, bb):
term = b * scipy.exp(-(X - m)**2.0 / (2.0 * s**2.0))
l1 += term
l2 += term * (X - m) / s**2.0
l = -l0 * scipy.exp(l1) * l2
return l
else:
raise NotImplementedError("Only n <= 1 is supported!")
|
Length scale function which is an exponential of a sum of Gaussians.
The centers and widths of the Gaussians are free parameters.
The length scale function is given by
.. math::
l = l_0 \exp\left ( \sum_{i=1}^{N}\beta_i\exp\left ( -\frac{(x-\mu_i)^2}{2\sigma_i^2} \right ) \right )
The number of parameters is equal to the three times the number of Gaussians
plus 1 (for :math:`l_0`). This function is inspired by what Gibbs used in
his PhD thesis.
Parameters
----------
X : 1d or 2d array of float
The points to evaluate the function at. If 2d, it should only have
one column (but this is not checked to save time).
n : int
The derivative order to compute. Used for all `X`.
l0 : float
The covariance length scale at the edges of the domain.
*msb : floats
Means, standard deviations and weights for each Gaussian, in that order.
|
entailment
|
def get_multiple_choices_required(self):
"""
Add only the required message, but no 'ng-required' attribute to the input fields,
otherwise all Checkboxes of a MultipleChoiceField would require the property "checked".
"""
errors = []
if self.required:
for key, msg in self.error_messages.items():
if key == 'required':
errors.append(('$error.required', msg))
return errors
|
Add only the required message, but no 'ng-required' attribute to the input fields,
otherwise all Checkboxes of a MultipleChoiceField would require the property "checked".
|
entailment
|
def sso(user, desired_username, name, email, profile_fields=None):
"""
Create a user, if the provided `user` is None, from the parameters.
Then log the user in, and return it.
"""
if not user:
if not settings.REGISTRATION_OPEN:
raise SSOError('Account registration is closed')
user = _create_desired_user(desired_username)
_configure_user(user, name, email, profile_fields)
if not user.is_active:
raise SSOError('Account disabled')
# login() expects the logging in backend to be set on the user.
# We are bypassing login, so fake it.
user.backend = settings.AUTHENTICATION_BACKENDS[0]
return user
|
Create a user, if the provided `user` is None, from the parameters.
Then log the user in, and return it.
|
entailment
|
def parallel_compute_ll_matrix(gp, bounds, num_pts, num_proc=None):
"""Compute matrix of the log likelihood over the parameter space in parallel.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
The number of points to use for each parameters. If a single int is
given, it will be used for each of the parameters.
num_proc : Positive int or None, optional
Number of processes to run the parallel computation with. If set to
None, ALL available cores are used. Default is None (use all available
cores).
Returns
-------
ll_vals : array
The log likelihood for each of the parameter possibilities.
param_vals : list of array
The parameter values used.
"""
if num_proc is None:
num_proc = multiprocessing.cpu_count()
present_free_params = gp.free_params
bounds = scipy.atleast_2d(scipy.asarray(bounds, dtype=float))
if bounds.shape[1] != 2:
raise ValueError("Argument bounds must have shape (n, 2)!")
# If bounds is a single tuple, repeat it for each free parameter:
if bounds.shape[0] == 1:
bounds = scipy.tile(bounds, (len(present_free_params), 1))
# If num_pts is a single value, use it for all of the parameters:
try:
iter(num_pts)
except TypeError:
num_pts = num_pts * scipy.ones(bounds.shape[0], dtype=int)
else:
num_pts = scipy.asarray(num_pts, dtype=int)
if len(num_pts) != len(present_free_params):
raise ValueError("Length of num_pts must match the number of free parameters of kernel!")
# Form arrays to evaluate parameters over:
param_vals = []
for k in xrange(0, len(present_free_params)):
param_vals.append(scipy.linspace(bounds[k, 0], bounds[k, 1], num_pts[k]))
pv_cases = list()
gp_cases = list()
num_pts_cases = list()
for k in xrange(0, len(param_vals[0])):
specific_param_vals = list(param_vals)
specific_param_vals[0] = param_vals[0][k]
pv_cases.append(specific_param_vals)
gp_cases += [copy.deepcopy(gp)]
num_pts_cases.append(num_pts)
pool = multiprocessing.Pool(processes=num_proc)
try:
vals = scipy.asarray(
pool.map(
_compute_ll_matrix_wrapper,
zip(gp_cases, pv_cases, num_pts_cases)
)
)
finally:
pool.close()
return (vals, param_vals)
|
Compute matrix of the log likelihood over the parameter space in parallel.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
The number of points to use for each parameters. If a single int is
given, it will be used for each of the parameters.
num_proc : Positive int or None, optional
Number of processes to run the parallel computation with. If set to
None, ALL available cores are used. Default is None (use all available
cores).
Returns
-------
ll_vals : array
The log likelihood for each of the parameter possibilities.
param_vals : list of array
The parameter values used.
|
entailment
|
def slice_plot(*args, **kwargs):
"""Constructs a plot that lets you look at slices through a multidimensional array.
Parameters
----------
vals : array, (`M`, `D`, `P`, ...)
Multidimensional array to visualize.
x_vals_1 : array, (`M`,)
Values along the first dimension.
x_vals_2 : array, (`D`,)
Values along the second dimension.
x_vals_3 : array, (`P`,)
Values along the third dimension.
**...and so on. At least four arguments must be provided.**
names : list of strings, optional
Names for each of the parameters at hand. If None, sequential numerical
identifiers will be used. Length must be equal to the number of
dimensions of `vals`. Default is None.
n : Positive int, optional
Number of contours to plot. Default is 100.
Returns
-------
f : :py:class:`Figure`
The Matplotlib figure instance created.
Raises
------
GPArgumentError
If the number of arguments is less than 4.
"""
names = kwargs.get('names', None)
n = kwargs.get('n', 100)
num_axes = len(args) - 1
if num_axes < 3:
raise GPArgumentError("Must pass at least four arguments to slice_plot!")
if num_axes != args[0].ndim:
raise GPArgumentError("Number of dimensions of the first argument "
"must match the number of additional arguments "
"provided!")
if names is None:
names = [str(k) for k in range(2, num_axes)]
f = plt.figure()
height_ratios = [8]
height_ratios += (num_axes - 2) * [1]
gs = mplgs.GridSpec(num_axes - 2 + 1, 2, height_ratios=height_ratios, width_ratios=[8, 1])
a_main = f.add_subplot(gs[0, 0])
a_cbar = f.add_subplot(gs[0, 1])
a_sliders = []
for idx in xrange(0, num_axes - 2):
a_sliders.append(f.add_subplot(gs[idx+1, :]))
title = f.suptitle("")
def update(val):
"""Update the slice shown.
"""
a_main.clear()
a_cbar.clear()
idxs = [int(slider.val) for slider in sliders]
vals = [args[k + 3][idxs[k]] for k in range(0, num_axes - 2)]
descriptions = tuple(itertools.chain.from_iterable(itertools.izip(names[2:], vals)))
fmt = "Slice" + (num_axes - 2) * ", %s: %f"
title.set_text(fmt % descriptions)
a_main.set_xlabel(names[1])
a_main.set_ylabel(names[0])
cs = a_main.contour(
args[2],
args[1],
args[0][scipy.s_[:, :] + tuple(idxs)].squeeze(),
n,
vmin=args[0].min(),
vmax=args[1].max()
)
cbar = f.colorbar(cs, cax=a_cbar)
cbar.set_label("LL")
f.canvas.draw()
idxs_0 = (num_axes - 2) * [0]
sliders = []
for idx in xrange(0, num_axes - 2):
sliders.append(
mplw.Slider(
a_sliders[idx],
'%s index' % names[idx + 2],
0,
len(args[idx + 3]) - 1,
valinit=idxs_0[idx],
valfmt='%d'
)
)
sliders[-1].on_changed(update)
update(idxs_0)
f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(sliders[0], evt))
return f
|
Constructs a plot that lets you look at slices through a multidimensional array.
Parameters
----------
vals : array, (`M`, `D`, `P`, ...)
Multidimensional array to visualize.
x_vals_1 : array, (`M`,)
Values along the first dimension.
x_vals_2 : array, (`D`,)
Values along the second dimension.
x_vals_3 : array, (`P`,)
Values along the third dimension.
**...and so on. At least four arguments must be provided.**
names : list of strings, optional
Names for each of the parameters at hand. If None, sequential numerical
identifiers will be used. Length must be equal to the number of
dimensions of `vals`. Default is None.
n : Positive int, optional
Number of contours to plot. Default is 100.
Returns
-------
f : :py:class:`Figure`
The Matplotlib figure instance created.
Raises
------
GPArgumentError
If the number of arguments is less than 4.
|
entailment
|
def arrow_respond(slider, event):
"""Event handler for arrow key events in plot windows.
Pass the slider object to update as a masked argument using a lambda function::
lambda evt: arrow_respond(my_slider, evt)
Parameters
----------
slider : Slider instance associated with this handler.
event : Event to be handled.
"""
if event.key == 'right':
slider.set_val(min(slider.val + 1, slider.valmax))
elif event.key == 'left':
slider.set_val(max(slider.val - 1, slider.valmin))
|
Event handler for arrow key events in plot windows.
Pass the slider object to update as a masked argument using a lambda function::
lambda evt: arrow_respond(my_slider, evt)
Parameters
----------
slider : Slider instance associated with this handler.
event : Event to be handled.
|
entailment
|
def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None):
""" Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
"""
assert amount >= 0
return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime)
|
Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
|
entailment
|
def credit(self, amount, debit_account, description, debit_memo="", credit_memo="", datetime=None):
""" Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively.
note amount must be non-negative.
"""
assert amount >= 0
return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime)
|
Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively.
note amount must be non-negative.
|
entailment
|
def post(self, amount, other_account, description, self_memo="", other_memo="", datetime=None):
""" Post a transaction of 'amount' against this account and the negative amount against 'other_account'.
This will show as a debit or credit against this account when amount > 0 or amount < 0 respectively.
"""
#Note: debits are always positive, credits are always negative. They should be negated before displaying
#(expense and liability?) accounts
tx = self._new_transaction()
if datetime:
tx.t_stamp = datetime
#else now()
tx.description = description
tx.save()
a1 = self._make_ae(self._DEBIT_IN_DB() * amount, self_memo, tx)
a1.save()
a2 = other_account._make_ae(-self._DEBIT_IN_DB() * amount, other_memo, tx)
a2.save()
return (a1, a2)
|
Post a transaction of 'amount' against this account and the negative amount against 'other_account'.
This will show as a debit or credit against this account when amount > 0 or amount < 0 respectively.
|
entailment
|
def balance(self, date=None):
""" returns the account balance as of 'date' (datetime stamp) or now(). """
qs = self._entries()
if date:
qs = qs.filter(transaction__t_stamp__lt=date)
r = qs.aggregate(b=Sum('amount'))
b = r['b']
flip = self._DEBIT_IN_DB()
if self._positive_credit():
flip *= -1
if b is None:
b = Decimal("0.00")
b *= flip
#print "returning balance %s for %s" % (b, self)
return b
|
returns the account balance as of 'date' (datetime stamp) or now().
|
entailment
|
def totals(self, start=None, end=None):
"""Returns a Totals object containing the sum of all debits, credits
and net change over the period of time from start to end.
'start' is inclusive, 'end' is exclusive
"""
qs = self._entries_range(start=start, end=end)
qs_positive = qs.filter(amount__gt=Decimal("0.00")).all().aggregate(Sum('amount'))
qs_negative = qs.filter(amount__lt=Decimal("0.00")).all().aggregate(Sum('amount'))
#Is there a cleaner way of saying this? Should the sum of 0 things be None?
positives = qs_positive['amount__sum'] if qs_positive['amount__sum'] is not None else 0
negatives = -qs_negative['amount__sum'] if qs_negative['amount__sum'] is not None else 0
if self._DEBIT_IN_DB() > 0:
debits = positives
credits = negatives
else:
debits = negatives
credits = positives
net = debits-credits
if self._positive_credit():
net = -net
return self.Totals(credits, debits, net)
|
Returns a Totals object containing the sum of all debits, credits
and net change over the period of time from start to end.
'start' is inclusive, 'end' is exclusive
|
entailment
|
def ledger(self, start=None, end=None):
"""Returns a list of entries for this account.
Ledger returns a sequence of LedgerEntry's matching the criteria
in chronological order. The returned sequence can be boolean-tested
(ie. test that nothing was returned).
If 'start' is given, only entries on or after that datetime are
returned. 'start' must be given with a timezone.
If 'end' is given, only entries before that datetime are
returned. 'end' must be given with a timezone.
"""
DEBIT_IN_DB = self._DEBIT_IN_DB()
flip = 1
if self._positive_credit():
flip *= -1
qs = self._entries_range(start=start, end=end)
qs = qs.order_by("transaction__t_stamp", "transaction__tid")
balance = Decimal("0.00")
if start:
balance = self.balance(start)
if not qs:
return []
#helper is a hack so the caller can test for no entries.
def helper(balance_in):
balance = balance_in
for e in qs.all():
amount = e.amount * DEBIT_IN_DB
o_balance = balance
balance += flip * amount
yield LedgerEntry(amount, e, o_balance, balance)
return helper(balance)
|
Returns a list of entries for this account.
Ledger returns a sequence of LedgerEntry's matching the criteria
in chronological order. The returned sequence can be boolean-tested
(ie. test that nothing was returned).
If 'start' is given, only entries on or after that datetime are
returned. 'start' must be given with a timezone.
If 'end' is given, only entries before that datetime are
returned. 'end' must be given with a timezone.
|
entailment
|
def get_third_party(self, third_party):
"""Return the account for the given third-party. Raise <something> if the third party doesn't belong to this bookset."""
actual_account = third_party.get_account()
assert actual_account.get_bookset() == self
return ThirdPartySubAccount(actual_account, third_party=third_party)
|
Return the account for the given third-party. Raise <something> if the third party doesn't belong to this bookset.
|
entailment
|
def get_third_party(self, third_party):
"""Return the account for the given third-party. Raise <something> if the third party doesn't belong to this bookset."""
actual_account = third_party.get_account()
assert actual_account.get_bookset() == self.get_bookset()
return ProjectAccount(actual_account, project=self, third_party=third_party)
|
Return the account for the given third-party. Raise <something> if the third party doesn't belong to this bookset.
|
entailment
|
def find_overlapping_slots(all_slots):
"""Find any slots that overlap"""
overlaps = set([])
for slot in all_slots:
# Because slots are ordered, we can be more efficient than this
# N^2 loop, but this is simple and, since the number of slots
# should be low, this should be "fast enough"
start = slot.get_start_time()
end = slot.end_time
for other_slot in all_slots:
if other_slot.pk == slot.pk:
continue
if other_slot.get_day() != slot.get_day():
# different days, can't overlap
continue
# Overlap if the start_time or end_time is bounded by our times
# start_time <= other.start_time < end_time
# or
# start_time < other.end_time <= end_time
other_start = other_slot.get_start_time()
other_end = other_slot.end_time
if start <= other_start and other_start < end:
overlaps.add(slot)
overlaps.add(other_slot)
elif start < other_end and other_end <= end:
overlaps.add(slot)
overlaps.add(other_slot)
return overlaps
|
Find any slots that overlap
|
entailment
|
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous"""
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous
|
Find any items that have slots that aren't contiguous
|
entailment
|
def validate_items(all_items):
"""Find errors in the schedule. Check for:
- pending / rejected talks in the schedule
- items with both talks and pages assigned
- items with neither talks nor pages assigned
"""
validation = []
for item in all_items:
if item.talk is not None and item.page is not None:
validation.append(item)
elif item.talk is None and item.page is None:
validation.append(item)
elif item.talk and item.talk.status not in [ACCEPTED, CANCELLED]:
validation.append(item)
return validation
|
Find errors in the schedule. Check for:
- pending / rejected talks in the schedule
- items with both talks and pages assigned
- items with neither talks nor pages assigned
|
entailment
|
def find_duplicate_schedule_items(all_items):
"""Find talks / pages assigned to mulitple schedule items"""
duplicates = []
seen_talks = {}
for item in all_items:
if item.talk and item.talk in seen_talks:
duplicates.append(item)
if seen_talks[item.talk] not in duplicates:
duplicates.append(seen_talks[item.talk])
else:
seen_talks[item.talk] = item
# We currently allow duplicate pages for cases were we need disjoint
# schedule items, like multiple open space sessions on different
# days and similar cases. This may be revisited later
return duplicates
|
Find talks / pages assigned to mulitple schedule items
|
entailment
|
def find_clashes(all_items):
"""Find schedule items which clash (common slot and venue)"""
clashes = {}
seen_venue_slots = {}
for item in all_items:
for slot in item.slots.all():
pos = (item.venue, slot)
if pos in seen_venue_slots:
if seen_venue_slots[pos] not in clashes:
clashes[pos] = [seen_venue_slots[pos]]
clashes[pos].append(item)
else:
seen_venue_slots[pos] = item
# We return a list, to match other validators
return clashes.items()
|
Find schedule items which clash (common slot and venue)
|
entailment
|
def find_invalid_venues(all_items):
"""Find venues assigned slots that aren't on the allowed list
of days."""
venues = {}
for item in all_items:
valid = False
item_days = list(item.venue.days.all())
for slot in item.slots.all():
for day in item_days:
if day == slot.get_day():
valid = True
break
if not valid:
venues.setdefault(item.venue, [])
venues[item.venue].append(item)
return venues.items()
|
Find venues assigned slots that aren't on the allowed list
of days.
|
entailment
|
def check_schedule():
"""Helper routine to easily test if the schedule is valid"""
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True
|
Helper routine to easily test if the schedule is valid
|
entailment
|
def validate_schedule():
"""Helper routine to report issues with the schedule"""
all_items = prefetch_schedule_items()
errors = []
for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
errors.append(msg)
all_slots = prefetch_slots()
for validator, _type, msg in SLOT_VALIDATORS:
if validator(all_slots):
errors.append(msg)
return errors
|
Helper routine to report issues with the schedule
|
entailment
|
def get_form(self, request, obj=None, **kwargs):
"""Change the form depending on whether we're adding or
editing the slot."""
if obj is None:
# Adding a new Slot
kwargs['form'] = SlotAdminAddForm
return super(SlotAdmin, self).get_form(request, obj, **kwargs)
|
Change the form depending on whether we're adding or
editing the slot.
|
entailment
|
def get_cached_menus():
"""Return the menus from the cache or generate them if needed."""
items = cache.get(CACHE_KEY)
if items is None:
menu = generate_menu()
cache.set(CACHE_KEY, menu.items)
else:
menu = Menu(items)
return menu
|
Return the menus from the cache or generate them if needed.
|
entailment
|
def maybe_obj(str_or_obj):
"""If argument is not a string, return it.
Otherwise import the dotted name and return that.
"""
if not isinstance(str_or_obj, six.string_types):
return str_or_obj
parts = str_or_obj.split(".")
mod, modname = None, None
for p in parts:
modname = p if modname is None else "%s.%s" % (modname, p)
try:
mod = __import__(modname)
except ImportError:
if mod is None:
raise
break
obj = mod
for p in parts[1:]:
obj = getattr(obj, p)
return obj
|
If argument is not a string, return it.
Otherwise import the dotted name and return that.
|
entailment
|
def generate_menu():
"""Generate a new list of menus."""
root_menu = Menu(list(copy.deepcopy(settings.WAFER_MENUS)))
for dynamic_menu_func in settings.WAFER_DYNAMIC_MENUS:
dynamic_menu_func = maybe_obj(dynamic_menu_func)
dynamic_menu_func(root_menu)
return root_menu
|
Generate a new list of menus.
|
entailment
|
def lock(self):
'''
Try to get locked the file
- the function will wait until the file is unlocked if 'wait' was defined as locktype
- the funciton will raise AlreadyLocked exception if 'lock' was defined as locktype
'''
# Open file
self.__fd = open(self.__lockfile, "w")
# Get it locked
if self.__locktype == "wait":
# Try to get it locked until ready
fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX)
elif self.__locktype == "lock":
# Try to get the locker if can not raise an exception
try:
fcntl.flock(self.__fd.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError:
raise AlreadyLocked("File is already locked")
|
Try to get locked the file
- the function will wait until the file is unlocked if 'wait' was defined as locktype
- the funciton will raise AlreadyLocked exception if 'lock' was defined as locktype
|
entailment
|
def _make_handler(state_token, done_function):
'''
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
'''
class LocalServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def error_response(self, msg):
logging.warn(
'Error response: %(msg)s. %(path)s',
msg=msg,
path=self.path)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(msg)
def do_GET(self):
parsed = urlparse.urlparse(self.path)
if len(parsed.query) == 0 or parsed.path != '/callback':
self.error_response(
'We encountered a problem with your request.')
return
params = urlparse.parse_qs(parsed.query)
if params['state'] != [state_token]:
self.error_response(
'Attack detected: state tokens did not match!')
return
if len(params['code']) != 1:
self.error_response('Wrong number of "code" query parameters.')
return
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(
"courseraoauth2client: we have captured Coursera's response "
"code. Feel free to close this browser window now and return "
"to your terminal. Thanks!")
done_function(params['code'][0])
return LocalServerHandler
|
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
|
entailment
|
def configuration():
'Loads configuration from the file system.'
defaults = '''
[oauth2]
hostname = localhost
port = 9876
api_endpoint = https://api.coursera.org
auth_endpoint = https://accounts.coursera.org/oauth2/v1/auth
token_endpoint = https://accounts.coursera.org/oauth2/v1/token
verify_tls = True
token_cache_base = ~/.coursera
[manage_graders]
client_id = NS8qaSX18X_Eu0pyNbLsnA
client_secret = bUqKqGywnGXEJPFrcd4Jpw
scopes = view_profile manage_graders
[manage_research_exports]
client_id = sDHC8Nfp-b1XMbzZx8Wa4w
client_secret = pgD4adDd7lm-ksfG7UazUA
scopes = view_profile manage_research_exports
'''
cfg = ConfigParser.SafeConfigParser()
cfg.readfp(io.BytesIO(defaults))
cfg.read([
'/etc/coursera/courseraoauth2client.cfg',
os.path.expanduser('~/.coursera/courseraoauth2client.cfg'),
'courseraoauth2client.cfg',
])
return cfg
|
Loads configuration from the file system.
|
entailment
|
def _load_token_cache(self):
'Reads the local fs cache for pre-authorized access tokens'
try:
logging.debug('About to read from local file cache file %s',
self.token_cache_file)
with open(self.token_cache_file, 'rb') as f:
fs_cached = cPickle.load(f)
if self._check_token_cache_type(fs_cached):
logging.debug('Loaded from file system: %s', fs_cached)
return fs_cached
else:
logging.warn('Found unexpected value in cache. %s',
fs_cached)
return None
except IOError:
logging.debug(
'Did not find file: %s on the file system.',
self.token_cache_file)
return None
except:
logging.info(
'Encountered exception loading from the file system.',
exc_info=True)
return None
|
Reads the local fs cache for pre-authorized access tokens
|
entailment
|
def _save_token_cache(self, new_cache):
'Write out to the filesystem a cache of the OAuth2 information.'
logging.debug('Looking to write to local authentication cache...')
if not self._check_token_cache_type(new_cache):
logging.error('Attempt to save a bad value: %s', new_cache)
return
try:
logging.debug('About to write to fs cache file: %s',
self.token_cache_file)
with open(self.token_cache_file, 'wb') as f:
cPickle.dump(new_cache, f, protocol=cPickle.HIGHEST_PROTOCOL)
logging.debug('Finished dumping cache_value to fs cache file.')
except:
logging.exception(
'Could not successfully cache OAuth2 secrets on the file '
'system.')
|
Write out to the filesystem a cache of the OAuth2 information.
|
entailment
|
def _check_token_cache_type(self, cache_value):
'''
Checks the cache_value for appropriate type correctness.
Pass strict=True for strict validation to ensure the latest types are
being written.
Returns true is correct type, False otherwise.
'''
def check_string_value(name):
return (
isinstance(cache_value[name], str) or
isinstance(cache_value[name], unicode)
)
def check_refresh_token():
if 'refresh' in cache_value:
return check_string_value('refresh')
else:
return True
return (
isinstance(cache_value, dict) and
'token' in cache_value and
'expires' in cache_value and
check_string_value('token') and
isinstance(cache_value['expires'], float) and
check_refresh_token()
)
|
Checks the cache_value for appropriate type correctness.
Pass strict=True for strict validation to ensure the latest types are
being written.
Returns true is correct type, False otherwise.
|
entailment
|
def _authorize_new_tokens(self):
'''
Stands up a new localhost http server and retrieves new OAuth2 access
tokens from the Coursera OAuth2 server.
'''
logging.info('About to request new OAuth2 tokens from Coursera.')
# Attempt to request new tokens from Coursera via the browser.
state_token = uuid.uuid4().hex
authorization_url = self._build_authorizaton_url(state_token)
sys.stdout.write(
'Please visit the following URL to authorize this app:\n')
sys.stdout.write('\t%s\n\n' % authorization_url)
if _platform == 'darwin':
# OS X -- leverage the 'open' command present on all modern macs
sys.stdout.write(
'Mac OS X detected; attempting to auto-open the url '
'in your default browser...\n')
try:
subprocess.check_call(['open', authorization_url])
except:
logging.exception('Could not call `open %(url)s`.',
url=authorization_url)
if self.local_webserver_port is not None:
# Boot up a local webserver to retrieve the response.
server_address = ('', self.local_webserver_port)
code_holder = CodeHolder()
local_server = BaseHTTPServer.HTTPServer(
server_address,
_make_handler(state_token, code_holder))
while not code_holder.has_code():
local_server.handle_request()
coursera_code = code_holder.code
else:
coursera_code = raw_input('Please enter the code from Coursera: ')
form_data = {
'code': coursera_code,
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self._redirect_uri,
'grant_type': 'authorization_code',
}
return self._request_tokens_from_token_endpoint(form_data)
|
Stands up a new localhost http server and retrieves new OAuth2 access
tokens from the Coursera OAuth2 server.
|
entailment
|
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None
|
Exchanges a refresh token for an access token
|
entailment
|
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__
|
function to determine if each select field needs a create button or not
|
entailment
|
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
"""
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value
|
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
|
entailment
|
def hyperprior(self):
"""Combined hyperprior for the kernel, noise kernel and (if present) mean function.
"""
hp = self.k.hyperprior * self.noise_k.hyperprior
if self.mu is not None:
hp *= self.mu.hyperprior
return hp
|
Combined hyperprior for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def fixed_params(self):
"""Combined fixed hyperparameter flags for the kernel, noise kernel and (if present) mean function.
"""
fp = CombinedBounds(self.k.fixed_params, self.noise_k.fixed_params)
if self.mu is not None:
fp = CombinedBounds(fp, self.mu.fixed_params)
return fp
|
Combined fixed hyperparameter flags for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def params(self):
"""Combined hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.params, self.noise_k.params)
if self.mu is not None:
p = CombinedBounds(p, self.mu.params)
return p
|
Combined hyperparameters for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def param_names(self):
"""Combined names for the hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
pn = CombinedBounds(self.k.param_names, self.noise_k.param_names)
if self.mu is not None:
pn = CombinedBounds(pn, self.mu.param_names)
return pn
|
Combined names for the hyperparameters for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def free_params(self):
"""Combined free hyperparameters for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_params, self.noise_k.free_params)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_params)
return p
|
Combined free hyperparameters for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params]
if self.mu is not None:
self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:]
|
Set the free parameters. Note that this bypasses enforce_bounds.
|
entailment
|
def free_param_bounds(self):
"""Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function.
"""
fpb = CombinedBounds(self.k.free_param_bounds, self.noise_k.free_param_bounds)
if self.mu is not None:
fpb = CombinedBounds(fpb, self.mu.free_param_bounds)
return fpb
|
Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def free_param_names(self):
"""Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
"""
p = CombinedBounds(self.k.free_param_names, self.noise_k.free_param_names)
if self.mu is not None:
p = CombinedBounds(p, self.mu.free_param_names)
return p
|
Combined free hyperparameter names for the kernel, noise kernel and (if present) mean function.
|
entailment
|
def add_data(self, X, y, err_y=0, n=0, T=None):
"""Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
"""
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if len(y.shape) != 1:
raise ValueError(
"Training targets y must have only one dimension with length "
"greater than one! Shape of y given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s." % (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar training input or convert array input into 2d.
X = scipy.atleast_2d(scipy.asarray(X, dtype=float))
# Correct single-dimension inputs:
if self.num_dim == 1 and X.shape[0] == 1:
X = X.T
if T is None and X.shape != (len(y), self.num_dim):
raise ValueError(
"Shape of training inputs must be (len(y), k.num_dim)! X given "
"has shape %s, shape of y is %s and num_dim=%d." % (X.shape, y.shape, self.num_dim)
)
# Handle scalar derivative orders or verify shape of array derivative
# orders matches shape of y:
try:
iter(n)
except TypeError:
n = n * scipy.ones_like(X, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
# Correct single-dimension inputs:
if self.num_dim == 1 and n.shape[1] != 1:
n = n.T
if n.shape != X.shape:
raise ValueError(
"When using array-like n, shape must be (len(y), k.num_dim)! "
"Shape of n given is %s, shape of y given is %s and num_dim=%d."
% (n.shape, y.shape, self.num_dim)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
# Handle transform:
if T is None and self.T is not None:
T = scipy.eye(len(y))
if T is not None:
T = scipy.atleast_2d(scipy.asarray(T, dtype=float))
if T.ndim != 2:
raise ValueError("T must have exactly 2 dimensions!")
if T.shape[0] != len(y):
raise ValueError(
"T must have as many rows are there are elements in y!"
)
if T.shape[1] != X.shape[0]:
raise ValueError(
"There must be as many columns in T as there are rows in X!"
)
if self.T is None and self.X is not None:
self.T = scipy.eye(len(self.y))
if self.T is None:
self.T = T
else:
self.T = scipy.linalg.block_diag(self.T, T)
if self.X is None:
self.X = X
else:
self.X = scipy.vstack((self.X, X))
self.y = scipy.append(self.y, y)
self.err_y = scipy.append(self.err_y, err_y)
if self.n is None:
self.n = n
else:
self.n = scipy.vstack((self.n, n))
self.K_up_to_date = False
|
Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
|
entailment
|
def condense_duplicates(self):
"""Condense duplicate points using a transformation matrix.
This is useful if you have multiple non-transformed points at the same
location or multiple transformed points that use the same quadrature
points.
Won't change the GP if all of the rows of [X, n] are unique. Will create
a transformation matrix T if necessary. Note that the order of the
points in [X, n] will be arbitrary after this operation.
If there are any transformed quantities (i.e., `self.T` is not None), it
will also remove any quadrature points for which all of the weights are
zero (even if all of the rows of [X, n] are unique).
"""
unique, inv = unique_rows(
scipy.hstack((self.X, self.n)),
return_inverse=True
)
# Only proceed if there is anything to be gained:
if len(unique) != len(self.X):
if self.T is None:
self.T = scipy.eye(len(self.y))
new_T = scipy.zeros((len(self.y), unique.shape[0]))
for j in xrange(0, len(inv)):
new_T[:, inv[j]] += self.T[:, j]
self.T = new_T
self.n = unique[:, self.X.shape[1]:]
self.X = unique[:, :self.X.shape[1]]
# Also remove any points which don't enter into the calculation:
if self.T is not None:
# Find the columns of T which actually enter in:
# Recall that T is (n, n_Q), X is (n_Q, n_dim).
good_cols = (self.T != 0.0).any(axis=0)
self.T = self.T[:, good_cols]
self.X = self.X[good_cols, :]
self.n = self.n[good_cols, :]
|
Condense duplicate points using a transformation matrix.
This is useful if you have multiple non-transformed points at the same
location or multiple transformed points that use the same quadrature
points.
Won't change the GP if all of the rows of [X, n] are unique. Will create
a transformation matrix T if necessary. Note that the order of the
points in [X, n] will be arbitrary after this operation.
If there are any transformed quantities (i.e., `self.T` is not None), it
will also remove any quadrature points for which all of the weights are
zero (even if all of the rows of [X, n] are unique).
|
entailment
|
def remove_outliers(self, thresh=3, **predict_kwargs):
"""Remove outliers from the GP with very simplistic outlier detection.
Removes points that are more than `thresh` * `err_y` away from the GP
mean. Note that this is only very rough in that it ignores the
uncertainty in the GP mean at any given point. But you should only be
using this as a rough way of removing bad channels, anyways!
Returns the values that were removed and a boolean array indicating
where the removed points were.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 3 (i.e.,
throw away all 3-sigma points).
**predict_kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict`. You can, for
instance, use this to make it use MCMC to evaluate the mean. (If you
don't use MCMC, then the current value of the hyperparameters is
used.)
Returns
-------
X_bad : array
Input values of the bad points.
y_bad : array
Bad values.
err_y_bad : array
Uncertainties on the bad values.
n_bad : array
Derivative order of the bad values.
bad_idxs : array
Array of booleans with the original shape of X with True wherever a
point was taken to be bad and subsequently removed.
T_bad : array
Transformation matrix of returned points. Only returned if
:py:attr:`T` is not None for the instance.
"""
mean = self.predict(
self.X, n=self.n, noise=False, return_std=False,
output_transform=self.T, **predict_kwargs
)
deltas = scipy.absolute(mean - self.y) / self.err_y
deltas[self.err_y == 0] = 0
bad_idxs = (deltas >= thresh)
good_idxs = ~bad_idxs
# Pull out the old values so they can be returned:
y_bad = self.y[bad_idxs]
err_y_bad = self.err_y[bad_idxs]
if self.T is not None:
T_bad = self.T[bad_idxs, :]
non_zero_cols = (T_bad != 0).all(axis=0)
T_bad = T_bad[:, non_zero_cols]
X_bad = self.X[non_zero_cols, :]
n_bad = self.n[non_zero_cols, :]
else:
X_bad = self.X[bad_idxs, :]
n_bad = self.n[bad_idxs, :]
# Delete the offending points:
if self.T is None:
self.X = self.X[good_idxs, :]
self.n = self.n[good_idxs, :]
else:
self.T = self.T[good_idxs, :]
non_zero_cols = (self.T != 0).all(axis=0)
self.T = self.T[:, non_zero_cols]
self.X = self.X[non_zero_cols, :]
self.n = self.n[non_zero_cols, :]
self.y = self.y[good_idxs]
self.err_y = self.err_y[good_idxs]
self.K_up_to_date = False
if self.T is None:
return (X_bad, y_bad, err_y_bad, n_bad, bad_idxs)
else:
return (X_bad, y_bad, err_y_bad, n_bad, bad_idxs, T_bad)
|
Remove outliers from the GP with very simplistic outlier detection.
Removes points that are more than `thresh` * `err_y` away from the GP
mean. Note that this is only very rough in that it ignores the
uncertainty in the GP mean at any given point. But you should only be
using this as a rough way of removing bad channels, anyways!
Returns the values that were removed and a boolean array indicating
where the removed points were.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 3 (i.e.,
throw away all 3-sigma points).
**predict_kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict`. You can, for
instance, use this to make it use MCMC to evaluate the mean. (If you
don't use MCMC, then the current value of the hyperparameters is
used.)
Returns
-------
X_bad : array
Input values of the bad points.
y_bad : array
Bad values.
err_y_bad : array
Uncertainties on the bad values.
n_bad : array
Derivative order of the bad values.
bad_idxs : array
Array of booleans with the original shape of X with True wherever a
point was taken to be bad and subsequently removed.
T_bad : array
Transformation matrix of returned points. Only returned if
:py:attr:`T` is not None for the instance.
|
entailment
|
def optimize_hyperparameters(self, method='SLSQP', opt_kwargs={},
verbose=False, random_starts=None,
num_proc=None, max_tries=1):
r"""Optimize the hyperparameters by maximizing the log-posterior.
Leaves the :py:class:`GaussianProcess` instance in the optimized state.
If :py:func:`scipy.optimize.minimize` is not available (i.e., if your
:py:mod:`scipy` version is older than 0.11.0) then :py:func:`fmin_slsqp`
is used independent of what you set for the `method` keyword.
If :py:attr:`use_hyper_deriv` is True the optimizer will attempt to use
the derivatives of the log-posterior with respect to the hyperparameters
to speed up the optimization. Note that only the squared exponential
covariance kernel supports hyperparameter derivatives at present.
Parameters
----------
method : str, optional
The method to pass to :py:func:`scipy.optimize.minimize`.
Refer to that function's docstring for valid options. Default
is 'SLSQP'. See note above about behavior with older versions of
:py:mod:`scipy`.
opt_kwargs : dict, optional
Dictionary of extra keywords to pass to
:py:func:`scipy.optimize.minimize`. Refer to that function's
docstring for valid options. Default is: {}.
verbose : bool, optional
Whether or not the output should be verbose. If True, the entire
:py:class:`Result` object from :py:func:`scipy.optimize.minimize` is
printed. If False, status information is only printed if the
`success` flag from :py:func:`minimize` is False. Default is False.
random_starts : non-negative int, optional
Number of times to randomly perturb the starting guesses
(distributed according to the hyperprior) in order to seek the
global minimum. If None, then `num_proc` random starts will be
performed. Default is None (do number of random starts equal to the
number of processors allocated). Note that for `random_starts` != 0,
the initial state of the hyperparameters is not actually used.
num_proc : non-negative int or None, optional
Number of processors to use with random starts. If 0, processing is
not done in parallel. If None, all available processors are used.
Default is None (use all available processors).
max_tries : int, optional
Number of times to run through the random start procedure if a
solution is not found. Default is to only go through the procedure
once.
"""
if opt_kwargs is None:
opt_kwargs = {}
else:
opt_kwargs = dict(opt_kwargs)
if 'method' in opt_kwargs:
method = opt_kwargs['method']
if self.verbose:
warnings.warn(
"Key 'method' is present in opt_kwargs, will override option "
"specified with method kwarg.",
RuntimeWarning
)
else:
opt_kwargs['method'] = method
if num_proc is None:
num_proc = multiprocessing.cpu_count()
param_ranges = scipy.asarray(self.free_param_bounds, dtype=float)
# Replace unbounded variables with something big:
param_ranges[scipy.where(scipy.isnan(param_ranges[:, 0])), 0] = -1e16
param_ranges[scipy.where(scipy.isnan(param_ranges[:, 1])), 1] = 1e16
param_ranges[scipy.where(scipy.isinf(param_ranges[:, 0])), 0] = -1e16
param_ranges[scipy.where(scipy.isinf(param_ranges[:, 1])), 1] = 1e16
if random_starts == 0:
num_proc = 0
param_samples = [self.free_params[:]]
else:
if random_starts is None:
random_starts = max(num_proc, 1)
# Distribute random guesses according to the hyperprior:
param_samples = self.hyperprior.random_draw(size=random_starts).T
param_samples = param_samples[:, ~self.fixed_params]
if 'bounds' not in opt_kwargs:
opt_kwargs['bounds'] = param_ranges
if self.use_hyper_deriv:
opt_kwargs['jac'] = True
trial = 0
res_min = None
while trial < max_tries and res_min is None:
if trial >= 1:
if self.verbose:
warnings.warn(
"No solutions found on trial %d, retrying random starts." % (trial - 1,),
RuntimeWarning
)
# Produce a new initial guess:
if random_starts != 0:
param_samples = self.hyperprior.random_draw(size=random_starts).T
param_samples = param_samples[:, ~self.fixed_params]
trial += 1
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
map_fun = pool.map
else:
map_fun = map
try:
res = map_fun(
_OptimizeHyperparametersEval(self, opt_kwargs),
param_samples
)
finally:
if num_proc > 1:
pool.close()
# Filter out the failed convergences:
res = [r for r in res if r is not None]
try:
res_min = min(res, key=lambda r: r.fun)
if scipy.isnan(res_min.fun) or scipy.isinf(res_min.fun):
res_min = None
except ValueError:
res_min = None
if res_min is None:
raise ValueError(
"Optimizer failed to find a valid solution. Try changing the "
"parameter bounds, picking a new initial guess or increasing the "
"number of random starts."
)
self.update_hyperparameters(res_min.x)
if verbose:
print("Got %d completed starts, optimal result is:" % (len(res),))
print(res_min)
print("\nLL\t%.3g" % (-1 * res_min.fun))
for v, l in zip(res_min.x, self.free_param_names):
print("%s\t%.3g" % (l.translate(None, '\\'), v))
if not res_min.success:
warnings.warn(
"Optimizer %s reports failure, selected hyperparameters are "
"likely NOT optimal. Status: %d, Message: '%s'. Try adjusting "
"bounds, initial guesses or the number of random starts used."
% (
method,
res_min.status,
res_min.message
),
RuntimeWarning
)
bounds = scipy.asarray(self.free_param_bounds)
# Augment the bounds a little bit to catch things that are one step away:
if ((res_min.x <= 1.001 * bounds[:, 0]).any() or
(res_min.x >= 0.999 * bounds[:, 1]).any()):
warnings.warn(
"Optimizer appears to have hit/exceeded the bounds. Bounds are:\n"
"%s\n, solution is:\n%s. Try adjusting bounds, initial guesses "
"or the number of random starts used."
% (str(bounds), str(res_min.x),)
)
return (res_min, len(res))
|
r"""Optimize the hyperparameters by maximizing the log-posterior.
Leaves the :py:class:`GaussianProcess` instance in the optimized state.
If :py:func:`scipy.optimize.minimize` is not available (i.e., if your
:py:mod:`scipy` version is older than 0.11.0) then :py:func:`fmin_slsqp`
is used independent of what you set for the `method` keyword.
If :py:attr:`use_hyper_deriv` is True the optimizer will attempt to use
the derivatives of the log-posterior with respect to the hyperparameters
to speed up the optimization. Note that only the squared exponential
covariance kernel supports hyperparameter derivatives at present.
Parameters
----------
method : str, optional
The method to pass to :py:func:`scipy.optimize.minimize`.
Refer to that function's docstring for valid options. Default
is 'SLSQP'. See note above about behavior with older versions of
:py:mod:`scipy`.
opt_kwargs : dict, optional
Dictionary of extra keywords to pass to
:py:func:`scipy.optimize.minimize`. Refer to that function's
docstring for valid options. Default is: {}.
verbose : bool, optional
Whether or not the output should be verbose. If True, the entire
:py:class:`Result` object from :py:func:`scipy.optimize.minimize` is
printed. If False, status information is only printed if the
`success` flag from :py:func:`minimize` is False. Default is False.
random_starts : non-negative int, optional
Number of times to randomly perturb the starting guesses
(distributed according to the hyperprior) in order to seek the
global minimum. If None, then `num_proc` random starts will be
performed. Default is None (do number of random starts equal to the
number of processors allocated). Note that for `random_starts` != 0,
the initial state of the hyperparameters is not actually used.
num_proc : non-negative int or None, optional
Number of processors to use with random starts. If 0, processing is
not done in parallel. If None, all available processors are used.
Default is None (use all available processors).
max_tries : int, optional
Number of times to run through the random start procedure if a
solution is not found. Default is to only go through the procedure
once.
|
entailment
|
def predict(self, Xstar, n=0, noise=False, return_std=True, return_cov=False,
full_output=False, return_samples=False, num_samples=1,
samp_kwargs={}, return_mean_func=False, use_MCMC=False,
full_MC=False, rejection_func=None, ddof=1, output_transform=None,
**kwargs):
"""Predict the mean and covariance at the inputs `Xstar`.
The order of the derivative is given by `n`. The keyword `noise` sets
whether or not noise is included in the prediction.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Order of derivative to predict (0 is the base quantity). If `n` is
scalar, the value is used for all points in `Xstar`. If non-integer
values are passed, they will be silently rounded. Default is 0
(return base quantity).
noise : bool, optional
Whether or not noise should be included in the covariance. Default
is False (no noise in covariance).
return_std : bool, optional
Set to True to compute and return the standard deviation for the
predictions, False to skip this step. Default is True (return tuple
of (`mean`, `std`)).
return_cov : bool, optional
Set to True to compute and return the full covariance matrix for the
predictions. This overrides the `return_std` keyword. If you want
both the standard deviation and covariance matrix pre-computed, use
the `full_output` keyword.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
================= ===========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_samples` is True)
mean_func mean function of GP (only if `return_mean_func` is True)
cov_func covariance of mean function of GP (zero if not using MCMC)
std_func standard deviation of mean function of GP (zero if not using MCMC)
mean_without_func mean of GP minus mean function of GP
cov_without_func covariance matrix of just the GP portion of the fit
std_without_func standard deviation of just the GP portion of the fit
================= ===========================================================================
return_samples : bool, optional
Set to True to compute and return samples of the GP in addition to
computing the mean. Only done if `full_output` is True. Default is
False.
num_samples : int, optional
Number of samples to compute. If using MCMC this is the number of
samples per MCMC sample, if using present values of hyperparameters
this is the number of samples actually returned. Default is 1.
samp_kwargs : dict, optional
Additional keywords to pass to :py:meth:`draw_sample` if
`return_samples` is True. Default is {}.
return_mean_func : bool, optional
Set to True to return the evaluation of the mean function in
addition to computing the mean of the process itself. Only done if
`full_output` is True and `self.mu` is not None. Default is False.
use_MCMC : bool, optional
Set to True to use :py:meth:`predict_MCMC` to evaluate the
prediction marginalized over the hyperparameters.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. The sample mean and covariance will be
evaluated after filtering through `rejection_func`, so conditional
means and covariances can be computed. Default is False (do not use
full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
The degree of freedom correction to use when computing the covariance
matrix when `full_MC` is True. Default is 1 (unbiased estimator).
output_transform: array, (`L`, `M`), optional
Matrix to use to transform the output vector of length `M` to one of
length `L`. This can, for instance, be used to compute integrals.
**kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict_MCMC` if
`use_MCMC` is True.
Returns
-------
mean : array, (`M`,)
Predicted GP mean. Only returned if `full_output` is False.
std : array, (`M`,)
Predicted standard deviation, only returned if `return_std` is True, `return_cov` is False and `full_output` is False.
cov : array, (`M`, `M`)
Predicted covariance matrix, only returned if `return_cov` is True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples and the mean function. Only returned if `full_output` is True.
Raises
------
ValueError
If `n` is not consistent with the shape of `Xstar` or is not entirely
composed of non-negative integers.
"""
if use_MCMC:
res = self.predict_MCMC(
Xstar,
n=n,
noise=noise,
return_std=return_std or full_output,
return_cov=return_cov or full_output,
return_samples=full_output and (return_samples or rejection_func),
return_mean_func=full_output and return_mean_func,
num_samples=num_samples,
samp_kwargs=samp_kwargs,
full_MC=full_MC,
rejection_func=rejection_func,
ddof=ddof,
output_transform=output_transform,
**kwargs
)
if full_output:
return res
elif return_cov:
return (res['mean'], res['cov'])
elif return_std:
return (res['mean'], res['std'])
else:
return res['mean']
else:
# Process Xstar:
Xstar = scipy.atleast_2d(scipy.asarray(Xstar, dtype=float))
# Handle 1d x case where array is passed in:
if self.num_dim == 1 and Xstar.shape[0] == 1:
Xstar = Xstar.T
if Xstar.shape[1] != self.num_dim:
raise ValueError(
"Second dimension of Xstar must be equal to self.num_dim! "
"Shape of Xstar given is %s, num_dim is %d."
% (Xstar.shape, self.num_dim)
)
# Process T:
if output_transform is not None:
output_transform = scipy.atleast_2d(scipy.asarray(output_transform, dtype=float))
if output_transform.ndim != 2:
raise ValueError(
"output_transform must have exactly 2 dimensions! Shape "
"of output_transform given is %s."
% (output_transform.shape,)
)
if output_transform.shape[1] != Xstar.shape[0]:
raise ValueError(
"output_transform must have the same number of columns "
"the number of rows in Xstar! Shape of output_transform "
"given is %s, shape of Xstar is %s."
% (output_transform.shape, Xstar.shape,)
)
# Process n:
try:
iter(n)
except TypeError:
n = n * scipy.ones(Xstar.shape, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
if self.num_dim == 1 and n.shape[0] == 1:
n = n.T
if n.shape != Xstar.shape:
raise ValueError(
"When using array-like n, shape must match shape of Xstar! "
"Shape of n given is %s, shape of Xstar given is %s."
% (n.shape, Xstar.shape)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
self.compute_K_L_alpha_ll()
Kstar = self.compute_Kij(self.X, Xstar, self.n, n)
if noise:
Kstar = Kstar + self.compute_Kij(self.X, Xstar, self.n, n, noise=True)
if self.T is not None:
Kstar = self.T.dot(Kstar)
mean = Kstar.T.dot(self.alpha)
if self.mu is not None:
mean_func = scipy.atleast_2d(self.mu(Xstar, n)).T
mean += mean_func
if output_transform is not None:
mean = output_transform.dot(mean)
if return_mean_func and self.mu is not None:
mean_func = output_transform.dot(mean_func)
mean = mean.ravel()
if return_mean_func and self.mu is not None:
mean_func = mean_func.ravel()
if return_std or return_cov or full_output or full_MC:
v = scipy.linalg.solve_triangular(self.L, Kstar, lower=True)
Kstarstar = self.compute_Kij(Xstar, None, n, None)
if noise:
Kstarstar = Kstarstar + self.compute_Kij(Xstar, None, n, None, noise=True)
covariance = Kstarstar - v.T.dot(v)
if output_transform is not None:
covariance = output_transform.dot(covariance.dot(output_transform.T))
if return_samples or full_MC:
samps = self.draw_sample(
Xstar, n=n, num_samp=num_samples, mean=mean,
cov=covariance, **samp_kwargs
)
if rejection_func:
good_samps = []
for samp in samps.T:
if rejection_func(samp):
good_samps.append(samp)
if len(good_samps) == 0:
raise ValueError("Did not get any good samples!")
samps = scipy.asarray(good_samps, dtype=float).T
if full_MC:
mean = scipy.mean(samps, axis=1)
covariance = scipy.cov(samps, rowvar=1, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(covariance))
if full_output:
out = {
'mean': mean,
'std': std,
'cov': covariance
}
if return_samples or full_MC:
out['samp'] = samps
if return_mean_func and self.mu is not None:
out['mean_func'] = mean_func
out['cov_func'] = scipy.zeros(
(len(mean_func), len(mean_func)),
dtype=float
)
out['std_func'] = scipy.zeros_like(mean_func)
out['mean_without_func'] = mean - mean_func
out['cov_without_func'] = covariance
out['std_without_func'] = std
return out
else:
if return_cov:
return (mean, covariance)
elif return_std:
return (mean, std)
else:
return mean
else:
return mean
|
Predict the mean and covariance at the inputs `Xstar`.
The order of the derivative is given by `n`. The keyword `noise` sets
whether or not noise is included in the prediction.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Order of derivative to predict (0 is the base quantity). If `n` is
scalar, the value is used for all points in `Xstar`. If non-integer
values are passed, they will be silently rounded. Default is 0
(return base quantity).
noise : bool, optional
Whether or not noise should be included in the covariance. Default
is False (no noise in covariance).
return_std : bool, optional
Set to True to compute and return the standard deviation for the
predictions, False to skip this step. Default is True (return tuple
of (`mean`, `std`)).
return_cov : bool, optional
Set to True to compute and return the full covariance matrix for the
predictions. This overrides the `return_std` keyword. If you want
both the standard deviation and covariance matrix pre-computed, use
the `full_output` keyword.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
================= ===========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_samples` is True)
mean_func mean function of GP (only if `return_mean_func` is True)
cov_func covariance of mean function of GP (zero if not using MCMC)
std_func standard deviation of mean function of GP (zero if not using MCMC)
mean_without_func mean of GP minus mean function of GP
cov_without_func covariance matrix of just the GP portion of the fit
std_without_func standard deviation of just the GP portion of the fit
================= ===========================================================================
return_samples : bool, optional
Set to True to compute and return samples of the GP in addition to
computing the mean. Only done if `full_output` is True. Default is
False.
num_samples : int, optional
Number of samples to compute. If using MCMC this is the number of
samples per MCMC sample, if using present values of hyperparameters
this is the number of samples actually returned. Default is 1.
samp_kwargs : dict, optional
Additional keywords to pass to :py:meth:`draw_sample` if
`return_samples` is True. Default is {}.
return_mean_func : bool, optional
Set to True to return the evaluation of the mean function in
addition to computing the mean of the process itself. Only done if
`full_output` is True and `self.mu` is not None. Default is False.
use_MCMC : bool, optional
Set to True to use :py:meth:`predict_MCMC` to evaluate the
prediction marginalized over the hyperparameters.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. The sample mean and covariance will be
evaluated after filtering through `rejection_func`, so conditional
means and covariances can be computed. Default is False (do not use
full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
The degree of freedom correction to use when computing the covariance
matrix when `full_MC` is True. Default is 1 (unbiased estimator).
output_transform: array, (`L`, `M`), optional
Matrix to use to transform the output vector of length `M` to one of
length `L`. This can, for instance, be used to compute integrals.
**kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`predict_MCMC` if
`use_MCMC` is True.
Returns
-------
mean : array, (`M`,)
Predicted GP mean. Only returned if `full_output` is False.
std : array, (`M`,)
Predicted standard deviation, only returned if `return_std` is True, `return_cov` is False and `full_output` is False.
cov : array, (`M`, `M`)
Predicted covariance matrix, only returned if `return_cov` is True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples and the mean function. Only returned if `full_output` is True.
Raises
------
ValueError
If `n` is not consistent with the shape of `Xstar` or is not entirely
composed of non-negative integers.
|
entailment
|
def plot(self, X=None, n=0, ax=None, envelopes=[1, 3], base_alpha=0.375,
return_prediction=False, return_std=True, full_output=False,
plot_kwargs={}, **kwargs):
"""Plots the Gaussian process using the current hyperparameters. Only for num_dim <= 2.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`), optional
The values to evaluate the Gaussian process at. If None, then 100
points between the minimum and maximum of the data's X are used for
a univariate Gaussian process and a 50x50 grid is used for a
bivariate Gaussian process. Default is None (use 100 points between
min and max).
n : int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
envelopes: list of float, optional
+/-n*sigma envelopes to plot. Default is [1, 3].
base_alpha : float, optional
Alpha value to use for +/-1*sigma envelope. All other envelopes `env`
are drawn with `base_alpha`/`env`. Default is 0.375.
return_prediction : bool, optional
If True, the predicted values are also returned. Default is False.
return_std : bool, optional
If True, the standard deviation is computed and returned along with
the mean when `return_prediction` is True. Default is True.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
==== ==========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_sample` is True)
==== ==========================================================================
plot_kwargs : dict, optional
The entries in this dictionary are passed as kwargs to the plotting
command used to plot the mean. Use this to, for instance, change the
color, line width and line style.
**kwargs : extra arguments for predict, optional
Extra arguments that are passed to :py:meth:`predict`.
Returns
-------
ax : axis instance
The axis instance used.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `return_prediction` is True and `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_prediction` and `return_std` are True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `return_prediction` and `full_output` are True.
"""
if self.num_dim > 2:
raise ValueError("Plotting is not supported for num_dim > 2!")
if self.num_dim == 1:
if X is None:
X = scipy.linspace(self.X.min(), self.X.max(), 100)
elif self.num_dim == 2:
if X is None:
x1 = scipy.linspace(self.X[:, 0].min(), self.X[:, 0].max(), 50)
x2 = scipy.linspace(self.X[:, 1].min(), self.X[:, 1].max(), 50)
X1, X2 = scipy.meshgrid(x1, x2)
X1 = X1.flatten()
X2 = X2.flatten()
X = scipy.hstack((scipy.atleast_2d(X1).T, scipy.atleast_2d(X2).T))
else:
X1 = scipy.asarray(X[:, 0]).flatten()
X2 = scipy.asarray(X[:, 1]).flatten()
if envelopes or (return_prediction and (return_std or full_output)):
out = self.predict(X, n=n, full_output=True, **kwargs)
mean = out['mean']
std = out['std']
else:
mean = self.predict(X, n=n, return_std=False, **kwargs)
std = None
if self.num_dim == 1:
univariate_envelope_plot(
X,
mean,
std,
ax=ax,
base_alpha=base_alpha,
envelopes=envelopes,
**plot_kwargs
)
elif self.num_dim == 2:
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
elif ax == 'gca':
ax = plt.gca()
if 'linewidths' not in kwargs:
kwargs['linewidths'] = 0
s = ax.plot_trisurf(X1, X2, mean, **plot_kwargs)
for i in envelopes:
kwargs.pop('alpha', base_alpha)
ax.plot_trisurf(X1, X2, mean - std, alpha=base_alpha / i, **kwargs)
ax.plot_trisurf(X1, X2, mean + std, alpha=base_alpha / i, **kwargs)
if return_prediction:
if full_output:
return (ax, out)
elif return_std:
return (ax, out['mean'], out['std'])
else:
return (ax, out['mean'])
else:
return ax
|
Plots the Gaussian process using the current hyperparameters. Only for num_dim <= 2.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`), optional
The values to evaluate the Gaussian process at. If None, then 100
points between the minimum and maximum of the data's X are used for
a univariate Gaussian process and a 50x50 grid is used for a
bivariate Gaussian process. Default is None (use 100 points between
min and max).
n : int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
envelopes: list of float, optional
+/-n*sigma envelopes to plot. Default is [1, 3].
base_alpha : float, optional
Alpha value to use for +/-1*sigma envelope. All other envelopes `env`
are drawn with `base_alpha`/`env`. Default is 0.375.
return_prediction : bool, optional
If True, the predicted values are also returned. Default is False.
return_std : bool, optional
If True, the standard deviation is computed and returned along with
the mean when `return_prediction` is True. Default is True.
full_output : bool, optional
Set to True to return the full outputs in a dictionary with keys:
==== ==========================================================================
mean mean of GP at requested points
std standard deviation of GP at requested points
cov covariance matrix for values of GP at requested points
samp random samples of GP at requested points (only if `return_sample` is True)
==== ==========================================================================
plot_kwargs : dict, optional
The entries in this dictionary are passed as kwargs to the plotting
command used to plot the mean. Use this to, for instance, change the
color, line width and line style.
**kwargs : extra arguments for predict, optional
Extra arguments that are passed to :py:meth:`predict`.
Returns
-------
ax : axis instance
The axis instance used.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `return_prediction` is True and `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_prediction` and `return_std` are True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `return_prediction` and `full_output` are True.
|
entailment
|
def draw_sample(self, Xstar, n=0, num_samp=1, rand_vars=None,
rand_type='standard normal', diag_factor=1e3,
method='cholesky', num_eig=None, mean=None, cov=None,
modify_sign=None, **kwargs):
"""Draw a sample evaluated at the given points `Xstar`.
Note that this function draws samples from the GP given the current
values for the hyperparameters (which may be in a nonsense state if you
just created the instance or called a method that performs MCMC sampling).
If you want to draw random samples from MCMC output, use the
`return_samples` and `full_output` keywords to :py:meth:`predict`.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Derivative order to evaluate at. Default is 0 (evaluate value).
noise : bool, optional
Whether or not to include the noise components of the kernel in the
sample. Default is False (no noise in samples).
num_samp : Positive int, optional
Number of samples to draw. Default is 1. Cannot be used in
conjunction with `rand_vars`: If you pass both `num_samp` and
`rand_vars`, `num_samp` will be silently ignored.
rand_vars : array, (`M`, `P`), optional
Vector of random variables :math:`u` to use in constructing the
sample :math:`y_* = f_* + Lu`, where :math:`K=LL^T`. If None,
values will be produced using
:py:func:`numpy.random.multivariate_normal`. This allows you to use
pseudo/quasi random numbers generated by an external routine. Note
that, when `method` is 'eig', the eigenvalues are in *ascending*
order.
Default is None (use :py:func:`multivariate_normal` directly).
rand_type : {'standard normal', 'uniform'}, optional
Type of distribution the inputs are given with.
* 'standard normal': Standard (`mu` = 0, `sigma` = 1) normal
distribution (this is the default)
* 'uniform': Uniform distribution on [0, 1). In this case
the required Gaussian variables are produced with inversion.
diag_factor : float, optional
Number (times machine epsilon) added to the diagonal of the
covariance matrix prior to computing its Cholesky decomposition.
This is necessary as sometimes the decomposition will fail because,
to machine precision, the matrix appears to not be positive definite.
If you are getting errors from :py:func:`scipy.linalg.cholesky`, try
increasing this an order of magnitude at a time. This parameter only
has an effect when using rand_vars. Default value is 1e3.
method : {'cholesky', 'eig'}, optional
Method to use for constructing the matrix square root. Default is
'cholesky' (use lower-triangular Cholesky decomposition).
* 'cholesky': Perform Cholesky decomposition on the covariance
matrix: :math:`K=LL^T`, use :math:`L` as the matrix square
root.
* 'eig': Perform an eigenvalue decomposition on the covariance
matrix: :math:`K=Q \\Lambda Q^{-1}`, use :math:`Q\\Lambda^{1/2}`
as the matrix square root.
num_eig : int or None, optional
Number of eigenvalues to compute. Can range from 1 to `M` (the
number of test points). If it is None, then all eigenvalues are
computed. Default is None (compute all eigenvalues). This keyword
only has an effect if `method` is 'eig'.
mean : array, (`M`,), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
cov : array, (`M`, `M`), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
modify_sign : {None, 'left value', 'right value', 'left slope', 'right slope', 'left concavity', 'right concavity'}, optional
If None (the default), the eigenvectors as returned by
:py:func:`scipy.linalg.eigh` are used without modification. To
modify the sign of the eigenvectors (necessary for some advanced use
cases), set this kwarg to one of the following:
* 'left value': forces the first value of each eigenvector to be
positive.
* 'right value': forces the last value of each eigenvector to be
positive.
* 'left slope': forces the slope to be positive at the start of
each eigenvector.
* 'right slope': forces the slope to be positive at the end of
each eigenvector.
* 'left concavity': forces the second derivative to be positive
at the start of each eigenvector.
* 'right concavity': forces the second derivative to be positive
at the end of each eigenvector.
**kwargs : optional kwargs
All extra keyword arguments are passed to :py:meth:`predict` when
evaluating the mean and covariance matrix of the GP.
Returns
-------
samples : :py:class:`Array` (`M`, `P`) or (`M`, `num_samp`)
Samples evaluated at the `M` points.
Raises
------
ValueError
If rand_type or method is invalid.
"""
# All of the input processing for Xstar and n will be done in here:
if mean is None or cov is None:
out = self.predict(Xstar, n=n, full_output=True, **kwargs)
mean = out['mean']
cov = out['cov']
if rand_vars is None and method != 'eig':
try:
return numpy.random.multivariate_normal(mean, cov, num_samp).T
except numpy.linalg.LinAlgError as e:
if self.verbose:
warnings.warn(
"Failure when drawing from MVN! Falling back on eig. "
"Exception was:\n%s"
% (e,),
RuntimeWarning
)
method = 'eig'
if num_eig is None or num_eig > len(mean):
num_eig = len(mean)
elif num_eig < 1:
num_eig = 1
if rand_vars is None:
rand_vars = numpy.random.standard_normal((num_eig, num_samp))
valid_types = ('standard normal', 'uniform')
if rand_type not in valid_types:
raise ValueError(
"rand_type %s not recognized! Valid options are: %s."
% (rand_type, valid_types,)
)
if rand_type == 'uniform':
rand_vars = scipy.stats.norm.ppf(rand_vars)
if method == 'cholesky':
L = scipy.linalg.cholesky(
cov + diag_factor * sys.float_info.epsilon * scipy.eye(cov.shape[0]),
lower=True,
check_finite=False
)
elif method == 'eig':
# TODO: Add support for specifying cutoff eigenvalue!
# Not technically lower triangular, but we'll keep the name L:
eig, Q = scipy.linalg.eigh(
cov + diag_factor * sys.float_info.epsilon * scipy.eye(cov.shape[0]),
eigvals=(len(mean) - 1 - (num_eig - 1), len(mean) - 1)
)
if modify_sign is not None:
if modify_sign == 'left value':
modify_mask = (Q[0, :] < 0.0)
elif modify_sign == 'right value':
modify_mask = (Q[-1, :] < 0.0)
elif modify_sign == 'left slope':
modify_mask = ((Q[1, :] - Q[0, :]) < 0.0)
elif modify_sign == 'right slope':
modify_mask = ((Q[-1, :] - Q[-2, :]) < 0.0)
elif modify_sign == 'left concavity':
modify_mask = ((Q[2, :] - 2 * Q[1, :] + Q[0, :]) < 0.0)
elif modify_sign == 'right concavity':
modify_mask = ((Q[-1, :] - 2 * Q[-2, :] + Q[-3, :]) < 0.0)
else:
raise ValueError(
"modify_sign %s not recognized!" % (modify_sign,)
)
Q[:, modify_mask] *= -1.0
Lam_1_2 = scipy.diag(scipy.sqrt(eig))
L = Q.dot(Lam_1_2)
else:
raise ValueError("method %s not recognized!" % (method,))
return scipy.atleast_2d(mean).T + L.dot(rand_vars[:num_eig, :])
|
Draw a sample evaluated at the given points `Xstar`.
Note that this function draws samples from the GP given the current
values for the hyperparameters (which may be in a nonsense state if you
just created the instance or called a method that performs MCMC sampling).
If you want to draw random samples from MCMC output, use the
`return_samples` and `full_output` keywords to :py:meth:`predict`.
Parameters
----------
Xstar : array, (`M`, `D`)
`M` test input values of dimension `D`.
n : array, (`M`, `D`) or scalar, non-negative int, optional
Derivative order to evaluate at. Default is 0 (evaluate value).
noise : bool, optional
Whether or not to include the noise components of the kernel in the
sample. Default is False (no noise in samples).
num_samp : Positive int, optional
Number of samples to draw. Default is 1. Cannot be used in
conjunction with `rand_vars`: If you pass both `num_samp` and
`rand_vars`, `num_samp` will be silently ignored.
rand_vars : array, (`M`, `P`), optional
Vector of random variables :math:`u` to use in constructing the
sample :math:`y_* = f_* + Lu`, where :math:`K=LL^T`. If None,
values will be produced using
:py:func:`numpy.random.multivariate_normal`. This allows you to use
pseudo/quasi random numbers generated by an external routine. Note
that, when `method` is 'eig', the eigenvalues are in *ascending*
order.
Default is None (use :py:func:`multivariate_normal` directly).
rand_type : {'standard normal', 'uniform'}, optional
Type of distribution the inputs are given with.
* 'standard normal': Standard (`mu` = 0, `sigma` = 1) normal
distribution (this is the default)
* 'uniform': Uniform distribution on [0, 1). In this case
the required Gaussian variables are produced with inversion.
diag_factor : float, optional
Number (times machine epsilon) added to the diagonal of the
covariance matrix prior to computing its Cholesky decomposition.
This is necessary as sometimes the decomposition will fail because,
to machine precision, the matrix appears to not be positive definite.
If you are getting errors from :py:func:`scipy.linalg.cholesky`, try
increasing this an order of magnitude at a time. This parameter only
has an effect when using rand_vars. Default value is 1e3.
method : {'cholesky', 'eig'}, optional
Method to use for constructing the matrix square root. Default is
'cholesky' (use lower-triangular Cholesky decomposition).
* 'cholesky': Perform Cholesky decomposition on the covariance
matrix: :math:`K=LL^T`, use :math:`L` as the matrix square
root.
* 'eig': Perform an eigenvalue decomposition on the covariance
matrix: :math:`K=Q \\Lambda Q^{-1}`, use :math:`Q\\Lambda^{1/2}`
as the matrix square root.
num_eig : int or None, optional
Number of eigenvalues to compute. Can range from 1 to `M` (the
number of test points). If it is None, then all eigenvalues are
computed. Default is None (compute all eigenvalues). This keyword
only has an effect if `method` is 'eig'.
mean : array, (`M`,), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
cov : array, (`M`, `M`), optional
If you have pre-computed the mean and covariance matrix, then you
can simply pass them in with the `mean` and `cov` keywords to save
on having to call :py:meth:`predict`.
modify_sign : {None, 'left value', 'right value', 'left slope', 'right slope', 'left concavity', 'right concavity'}, optional
If None (the default), the eigenvectors as returned by
:py:func:`scipy.linalg.eigh` are used without modification. To
modify the sign of the eigenvectors (necessary for some advanced use
cases), set this kwarg to one of the following:
* 'left value': forces the first value of each eigenvector to be
positive.
* 'right value': forces the last value of each eigenvector to be
positive.
* 'left slope': forces the slope to be positive at the start of
each eigenvector.
* 'right slope': forces the slope to be positive at the end of
each eigenvector.
* 'left concavity': forces the second derivative to be positive
at the start of each eigenvector.
* 'right concavity': forces the second derivative to be positive
at the end of each eigenvector.
**kwargs : optional kwargs
All extra keyword arguments are passed to :py:meth:`predict` when
evaluating the mean and covariance matrix of the GP.
Returns
-------
samples : :py:class:`Array` (`M`, `P`) or (`M`, `num_samp`)
Samples evaluated at the `M` points.
Raises
------
ValueError
If rand_type or method is invalid.
|
entailment
|
def update_hyperparameters(self, new_params, hyper_deriv_handling='default', exit_on_bounds=True, inf_on_error=True):
r"""Update the kernel's hyperparameters to the new parameters.
This will call :py:meth:`compute_K_L_alpha_ll` to update the state
accordingly.
Note that if this method crashes and the `hyper_deriv_handling` keyword
was used, it may leave :py:attr:`use_hyper_deriv` in the wrong state.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, length dictated by kernel
New parameters to use.
hyper_deriv_handling : {'default', 'value', 'deriv'}, optional
Determines what to compute and return. If 'default' and
:py:attr:`use_hyper_deriv` is True then the negative log-posterior
and the negative gradient of the log-posterior with respect to the
hyperparameters is returned. If 'default' and
:py:attr:`use_hyper_deriv` is False or 'value' then only the negative
log-posterior is returned. If 'deriv' then only the negative gradient
of the log-posterior with respect to the hyperparameters is returned.
exit_on_bounds : bool, optional
If True, the method will automatically exit if the hyperparameters
are impossible given the hyperprior, without trying to update the
internal state. This is useful during MCMC sampling and optimization.
Default is True (don't perform update for impossible hyperparameters).
inf_on_error : bool, optional
If True, the method will return `scipy.inf` if the hyperparameters
produce a linear algebra error upon trying to update the Gaussian
process. Default is True (catch errors and return infinity).
Returns
-------
-1*ll : float
The updated log posterior.
-1*ll_deriv : array of float, (`num_params`,)
The gradient of the log posterior. Only returned if
:py:attr:`use_hyper_deriv` is True or `hyper_deriv_handling` is set
to 'deriv'.
"""
use_hyper_deriv = self.use_hyper_deriv
if hyper_deriv_handling == 'value':
self.use_hyper_deriv = False
elif hyper_deriv_handling == 'deriv':
self.use_hyper_deriv = True
self.k.set_hyperparams(new_params[:len(self.k.free_params)])
self.noise_k.set_hyperparams(
new_params[len(self.k.free_params):len(self.k.free_params) + len(self.noise_k.free_params)]
)
if self.mu is not None:
self.mu.set_hyperparams(
new_params[len(self.k.free_params) + len(self.noise_k.free_params):]
)
self.K_up_to_date = False
try:
if exit_on_bounds:
if scipy.isinf(self.hyperprior(self.params)):
raise GPImpossibleParamsError("Impossible values for params!")
self.compute_K_L_alpha_ll()
except Exception as e:
if inf_on_error:
if not isinstance(e, GPImpossibleParamsError) and self.verbose:
warnings.warn(
"Unhandled exception when updating GP! Exception was:\n%s\n"
"State of params is: %s"
% (traceback.format_exc(), str(self.free_params[:]))
)
self.use_hyper_deriv = use_hyper_deriv
if use_hyper_deriv and hyper_deriv_handling == 'default':
return (scipy.inf, scipy.zeros(len(self.free_params)))
elif hyper_deriv_handling == 'deriv':
return scipy.zeros(len(self.free_params))
else:
return scipy.inf
else:
self.use_hyper_deriv = use_hyper_deriv
raise e
self.use_hyper_deriv = use_hyper_deriv
if use_hyper_deriv and hyper_deriv_handling == 'default':
return (-1.0 * self.ll, -1.0 * self.ll_deriv)
elif hyper_deriv_handling == 'deriv':
return -1.0 * self.ll_deriv
else:
return -1.0 * self.ll
|
r"""Update the kernel's hyperparameters to the new parameters.
This will call :py:meth:`compute_K_L_alpha_ll` to update the state
accordingly.
Note that if this method crashes and the `hyper_deriv_handling` keyword
was used, it may leave :py:attr:`use_hyper_deriv` in the wrong state.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, length dictated by kernel
New parameters to use.
hyper_deriv_handling : {'default', 'value', 'deriv'}, optional
Determines what to compute and return. If 'default' and
:py:attr:`use_hyper_deriv` is True then the negative log-posterior
and the negative gradient of the log-posterior with respect to the
hyperparameters is returned. If 'default' and
:py:attr:`use_hyper_deriv` is False or 'value' then only the negative
log-posterior is returned. If 'deriv' then only the negative gradient
of the log-posterior with respect to the hyperparameters is returned.
exit_on_bounds : bool, optional
If True, the method will automatically exit if the hyperparameters
are impossible given the hyperprior, without trying to update the
internal state. This is useful during MCMC sampling and optimization.
Default is True (don't perform update for impossible hyperparameters).
inf_on_error : bool, optional
If True, the method will return `scipy.inf` if the hyperparameters
produce a linear algebra error upon trying to update the Gaussian
process. Default is True (catch errors and return infinity).
Returns
-------
-1*ll : float
The updated log posterior.
-1*ll_deriv : array of float, (`num_params`,)
The gradient of the log posterior. Only returned if
:py:attr:`use_hyper_deriv` is True or `hyper_deriv_handling` is set
to 'deriv'.
|
entailment
|
def compute_K_L_alpha_ll(self):
r"""Compute `K`, `L`, `alpha` and log-likelihood according to the first part of Algorithm 2.1 in R&W.
Computes `K` and the noise portion of `K` using :py:meth:`compute_Kij`,
computes `L` using :py:func:`scipy.linalg.cholesky`, then computes
`alpha` as `L.T\\(L\\y)`.
Only does the computation if :py:attr:`K_up_to_date` is False --
otherwise leaves the existing values.
"""
if not self.K_up_to_date:
y = self.y
err_y = self.err_y
self.K = self.compute_Kij(self.X, None, self.n, None, noise=False)
# If the noise kernel is meant to be strictly diagonal, it should
# yield a diagonal noise_K:
if isinstance(self.noise_k, ZeroKernel):
self.noise_K = scipy.zeros((self.X.shape[0], self.X.shape[0]))
elif isinstance(self.noise_k, DiagonalNoiseKernel):
self.noise_K = self.noise_k.params[0]**2.0 * scipy.eye(self.X.shape[0])
else:
self.noise_K = self.compute_Kij(self.X, None, self.n, None, noise=True)
K = self.K
noise_K = self.noise_K
if self.T is not None:
KnK = self.T.dot(K + noise_K).dot(self.T.T)
else:
KnK = K + noise_K
K_tot = (
KnK +
scipy.diag(err_y**2.0) +
self.diag_factor * sys.float_info.epsilon * scipy.eye(len(y))
)
self.L = scipy.linalg.cholesky(K_tot, lower=True)
# Need to make the mean-subtracted y that appears in the expression
# for alpha:
if self.mu is not None:
mu_alph = self.mu(self.X, self.n)
if self.T is not None:
mu_alph = self.T.dot(mu_alph)
y_alph = self.y - mu_alph
else:
y_alph = self.y
self.alpha = scipy.linalg.cho_solve((self.L, True), scipy.atleast_2d(y_alph).T)
self.ll = (
-0.5 * scipy.atleast_2d(y_alph).dot(self.alpha) -
scipy.log(scipy.diag(self.L)).sum() -
0.5 * len(y) * scipy.log(2.0 * scipy.pi)
)[0, 0]
# Apply hyperpriors:
self.ll += self.hyperprior(self.params)
if self.use_hyper_deriv:
warnings.warn("Use of hyperparameter derivatives is experimental!")
# Only compute for the free parameters, since that is what we
# want to optimize:
self.ll_deriv = scipy.zeros(len(self.free_params))
# Combine the kernel and noise kernel so we only need one loop:
if isinstance(self.noise_k, ZeroKernel):
knk = self.k
elif isinstance(self.noise_k, DiagonalNoiseKernel):
knk = self.k
# Handle DiagonalNoiseKernel specially:
if not self.noise_k.fixed_params[0]:
dK_dtheta_i = 2.0 * self.noise_k.params[0] * scipy.eye(len(y))
self.ll_deriv[len(self.k.free_params)] = 0.5 * (
self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) -
scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i))
)
else:
knk = self.k + self.noise_k
# Get the indices of the free params in knk.params:
free_param_idxs = scipy.arange(0, len(knk.params), dtype=int)[~knk.fixed_params]
# Handle the kernel and noise kernel:
for i, pi in enumerate(free_param_idxs):
dK_dtheta_i = self.compute_Kij(
self.X, None, self.n, None, k=knk, hyper_deriv=pi
)
if self.T is not None:
dK_dtheta_i = self.T.dot(dK_dtheta_i).dot(self.T.T)
self.ll_deriv[i] = 0.5 * (
self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) -
scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i))
)
# Handle the mean function:
if self.mu is not None:
# Get the indices of the free params in self.mu.params:
free_param_idxs = scipy.arange(0, len(self.mu.params), dtype=int)[~self.mu.fixed_params]
for i, pi in enumerate(free_param_idxs):
dmu_dtheta_i = scipy.atleast_2d(self.mu(self.X, self.n, hyper_deriv=pi)).T
if self.T is not None:
dmu_dtheta_i = self.T.dot(dmu_dtheta_i)
self.ll_deriv[i + len(knk.free_params)] = dmu_dtheta_i.T.dot(self.alpha)
# Handle the hyperprior:
# Get the indices of the free params in self.params:
free_param_idxs = scipy.arange(0, len(self.params), dtype=int)[~self.fixed_params]
for i, pi in enumerate(free_param_idxs):
self.ll_deriv[i] += self.hyperprior(self.params, hyper_deriv=pi)
self.K_up_to_date = True
|
r"""Compute `K`, `L`, `alpha` and log-likelihood according to the first part of Algorithm 2.1 in R&W.
Computes `K` and the noise portion of `K` using :py:meth:`compute_Kij`,
computes `L` using :py:func:`scipy.linalg.cholesky`, then computes
`alpha` as `L.T\\(L\\y)`.
Only does the computation if :py:attr:`K_up_to_date` is False --
otherwise leaves the existing values.
|
entailment
|
def compute_Kij(self, Xi, Xj, ni, nj, noise=False, hyper_deriv=None, k=None):
r"""Compute covariance matrix between datasets `Xi` and `Xj`.
Specify the orders of derivatives at each location with the `ni`, `nj`
arrays. The `include_noise` flag is passed to the covariance kernel to
indicate whether noise is to be included (i.e., for evaluation of
:math:`K+\sigma I` versus :math:`K_*`).
If `Xj` is None, the symmetric matrix :math:`K(X, X)` is formed.
Note that type and dimension checking is NOT performed, as it is assumed
the data are from inside the instance and have hence been sanitized by
:py:meth:`add_data`.
Parameters
----------
Xi : array, (`M`, `D`)
`M` input values of dimension `D`.
Xj : array, (`P`, `D`)
`P` input values of dimension `D`.
ni : array, (`M`, `D`), non-negative integers
`M` derivative orders with respect to the `Xi` coordinates.
nj : array, (`P`, `D`), non-negative integers
`P` derivative orders with respect to the `Xj` coordinates.
noise : bool, optional
If True, uses the noise kernel, otherwise uses the regular kernel.
Default is False (use regular kernel).
hyper_deriv : None or non-negative int, optional
Index of the hyperparameter to compute the first derivative with
respect to. If None, no derivatives are taken. Default is None (no
hyperparameter derivatives).
k : :py:class:`~gptools.kernel.core.Kernel` instance, optional
The covariance kernel to used. Overrides `noise` if present.
Returns
-------
Kij : array, (`M`, `P`)
Covariance matrix between `Xi` and `Xj`.
"""
if k is None:
if not noise:
k = self.k
else:
k = self.noise_k
if Xj is None:
symmetric = True
Xj = Xi
nj = ni
else:
symmetric = False
# TODO: This technically doesn't take advantage of the symmetric case.
# Might be worth trying to do that at some point, but this is vastly
# superior to the double for loop implementation for which using
# symmetry is easy.
Xi_tile = scipy.repeat(Xi, Xj.shape[0], axis=0)
ni_tile = scipy.repeat(ni, Xj.shape[0], axis=0)
Xj_tile = scipy.tile(Xj, (Xi.shape[0], 1))
nj_tile = scipy.tile(nj, (Xi.shape[0], 1))
Kij = k(
Xi_tile,
Xj_tile,
ni_tile,
nj_tile,
hyper_deriv=hyper_deriv,
symmetric=symmetric
)
Kij = scipy.reshape(Kij, (Xi.shape[0], -1))
return Kij
|
r"""Compute covariance matrix between datasets `Xi` and `Xj`.
Specify the orders of derivatives at each location with the `ni`, `nj`
arrays. The `include_noise` flag is passed to the covariance kernel to
indicate whether noise is to be included (i.e., for evaluation of
:math:`K+\sigma I` versus :math:`K_*`).
If `Xj` is None, the symmetric matrix :math:`K(X, X)` is formed.
Note that type and dimension checking is NOT performed, as it is assumed
the data are from inside the instance and have hence been sanitized by
:py:meth:`add_data`.
Parameters
----------
Xi : array, (`M`, `D`)
`M` input values of dimension `D`.
Xj : array, (`P`, `D`)
`P` input values of dimension `D`.
ni : array, (`M`, `D`), non-negative integers
`M` derivative orders with respect to the `Xi` coordinates.
nj : array, (`P`, `D`), non-negative integers
`P` derivative orders with respect to the `Xj` coordinates.
noise : bool, optional
If True, uses the noise kernel, otherwise uses the regular kernel.
Default is False (use regular kernel).
hyper_deriv : None or non-negative int, optional
Index of the hyperparameter to compute the first derivative with
respect to. If None, no derivatives are taken. Default is None (no
hyperparameter derivatives).
k : :py:class:`~gptools.kernel.core.Kernel` instance, optional
The covariance kernel to used. Overrides `noise` if present.
Returns
-------
Kij : array, (`M`, `P`)
Covariance matrix between `Xi` and `Xj`.
|
entailment
|
def compute_ll_matrix(self, bounds, num_pts):
"""Compute the log likelihood over the (free) parameter space.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
If a single int is given, it will be used for each of the parameters.
Returns
-------
ll_vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities.
param_vals : List of :py:class:`Array`
The parameter values used.
"""
present_free_params = self.free_params[:]
bounds = scipy.atleast_2d(scipy.asarray(bounds, dtype=float))
if bounds.shape[1] != 2:
raise ValueError("Argument bounds must have shape (n, 2)!")
# If bounds is a single tuple, repeat it for each free parameter:
if bounds.shape[0] == 1:
bounds = scipy.tile(bounds, (len(present_free_params), 1))
# If num_pts is a single value, use it for all of the parameters:
try:
iter(num_pts)
except TypeError:
num_pts = num_pts * scipy.ones(bounds.shape[0], dtype=int)
else:
num_pts = scipy.asarray(num_pts, dtype=int)
if len(num_pts) != len(present_free_params):
raise ValueError(
"Length of num_pts must match the number of free parameters!"
)
# Form arrays to evaluate parameters over:
param_vals = []
for k in xrange(0, len(present_free_params)):
param_vals.append(scipy.linspace(bounds[k, 0], bounds[k, 1], num_pts[k]))
ll_vals = self._compute_ll_matrix(0, param_vals, num_pts)
# Reset the parameters to what they were before:
self.update_hyperparameters(scipy.asarray(present_free_params, dtype=float))
return (ll_vals, param_vals)
|
Compute the log likelihood over the (free) parameter space.
Parameters
----------
bounds : 2-tuple or list of 2-tuples with length equal to the number of free parameters
Bounds on the range to use for each of the parameters. If a single
2-tuple is given, it will be used for each of the parameters.
num_pts : int or list of ints with length equal to the number of free parameters
If a single int is given, it will be used for each of the parameters.
Returns
-------
ll_vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities.
param_vals : List of :py:class:`Array`
The parameter values used.
|
entailment
|
def _compute_ll_matrix(self, idx, param_vals, num_pts):
"""Recursive helper function for compute_ll_matrix.
Parameters
----------
idx : int
The index of the parameter for this layer of the recursion to
work on. `idx` == len(`num_pts`) is the base case that terminates
the recursion.
param_vals : List of :py:class:`Array`
List of arrays of parameter values. Entries in the slots 0:`idx` are
set to scalars by the previous levels of recursion.
num_pts : :py:class:`Array`
The numbers of points for each parameter.
Returns
-------
vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities at lower
levels.
"""
if idx >= len(num_pts):
# Base case: All entries in param_vals should be scalars:
return -1.0 * self.update_hyperparameters(
scipy.asarray(param_vals, dtype=float)
)
else:
# Recursive case: call _compute_ll_matrix for each entry in param_vals[idx]:
vals = scipy.zeros(num_pts[idx:], dtype=float)
for k in xrange(0, len(param_vals[idx])):
specific_param_vals = list(param_vals)
specific_param_vals[idx] = param_vals[idx][k]
vals[k] = self._compute_ll_matrix(
idx + 1,
specific_param_vals,
num_pts
)
return vals
|
Recursive helper function for compute_ll_matrix.
Parameters
----------
idx : int
The index of the parameter for this layer of the recursion to
work on. `idx` == len(`num_pts`) is the base case that terminates
the recursion.
param_vals : List of :py:class:`Array`
List of arrays of parameter values. Entries in the slots 0:`idx` are
set to scalars by the previous levels of recursion.
num_pts : :py:class:`Array`
The numbers of points for each parameter.
Returns
-------
vals : :py:class:`Array`
The log likelihood for each of the parameter possibilities at lower
levels.
|
entailment
|
def sample_hyperparameter_posterior(self, nwalkers=200, nsamp=500, burn=0,
thin=1, num_proc=None, sampler=None,
plot_posterior=False,
plot_chains=False, sampler_type='ensemble',
ntemps=20, sampler_a=2.0, **plot_kwargs):
"""Produce samples from the posterior for the hyperparameters using MCMC.
Returns the sampler created, because storing it stops the GP from being
pickleable. To add more samples to a previous sampler, pass the sampler
instance in the `sampler` keyword.
Parameters
----------
nwalkers : int, optional
The number of walkers to use in the sampler. Should be on the order
of several hundred. Default is 200.
nsamp : int, optional
Number of samples (per walker) to take. Default is 500.
burn : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with burn-in,
see :py:meth:`compute_from_MCMC`. The number of samples to discard
at the beginning of the chain. Default is 0.
thin : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with thinning,
see :py:meth:`compute_from_MCMC`. Every `thin`-th sample is kept.
Default is 1.
num_proc : int or None, optional
Number of processors to use. If None, all available processors are
used. Default is None (use all available processors).
sampler : :py:class:`Sampler` instance
The sampler to use. If the sampler already has samples, the most
recent sample will be used as the starting point. Otherwise a
random sample from the hyperprior will be used.
plot_posterior : bool, optional
If True, a corner plot of the posterior for the hyperparameters
will be generated. Default is False.
plot_chains : bool, optional
If True, a plot showing the history and autocorrelation of the
chains will be produced.
sampler_type : str, optional
The type of sampler to use. Valid options are "ensemble" (affine-
invariant ensemble sampler) and "pt" (parallel-tempered ensemble
sampler).
ntemps : int, optional
Number of temperatures to use with the parallel-tempered ensemble
sampler.
sampler_a : float, optional
Scale of the proposal distribution.
plot_kwargs : additional keywords, optional
Extra arguments to pass to :py:func:`~gptools.utils.plot_sampler`.
"""
if num_proc is None:
num_proc = multiprocessing.cpu_count()
# Needed for emcee to do it right:
if num_proc == 0:
num_proc = 1
ndim = len(self.free_params)
if sampler is None:
if sampler_type == 'ensemble':
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
_ComputeLnProbEval(self),
threads=num_proc,
a=sampler_a
)
elif sampler_type == 'pt':
# TODO: Finish this!
raise NotImplementedError("PTSampler not done yet!")
sampler = emcee.PTSampler(
ntemps,
nwalkers,
ndim,
logl,
logp
)
else:
raise NotImplementedError(
"Sampler type %s not supported!" % (sampler_type,)
)
else:
sampler.a = sampler_a
if sampler.chain.size == 0:
theta0 = self.hyperprior.random_draw(size=nwalkers).T
theta0 = theta0[:, ~self.fixed_params]
else:
# Start from the stopping point of the previous chain:
theta0 = sampler.chain[:, -1, :]
sampler.run_mcmc(theta0, nsamp)
if plot_posterior or plot_chains:
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
if plot_posterior and plot_chains:
plot_sampler(
sampler,
labels=['$%s$' % (l,) for l in self.free_param_names],
burn=burn,
**plot_kwargs
)
else:
if plot_posterior:
triangle.corner(
flat_trace,
plot_datapoints=False,
labels=['$%s$' % (l,) for l in self.free_param_names]
)
if plot_chains:
f = plt.figure()
for k in xrange(0, ndim):
# a = f.add_subplot(3, ndim, k + 1)
# a.acorr(
# sampler.flatchain[:, k],
# maxlags=100,
# detrend=plt.mlab.detrend_mean
# )
# a.set_xlabel('lag')
# a.set_title('$%s$ autocorrelation' % (self.free_param_names[k],))
a = f.add_subplot(ndim, 1, 0 * ndim + k + 1)
for chain in sampler.chain[:, :, k]:
a.plot(chain)
a.set_xlabel('sample')
a.set_ylabel('$%s$' % (self.free_param_names[k],))
a.set_title('$%s$ all chains' % (self.free_param_names[k],))
a.axvline(burn, color='r', linewidth=3, ls='--')
# a = f.add_subplot(2, ndim, 1 * ndim + k + 1)
# a.plot(flat_trace[:, k])
# a.set_xlabel('sample')
# a.set_ylabel('$%s$' % (self.free_param_names[k],))
# a.set_title('$%s$ flattened, burned and thinned chain' % (self.free_param_names[k],))
# Print a summary of the sampler:
print("MCMC parameter summary:")
print("param\tmean\t95% posterior interval")
mean, ci_l, ci_u = summarize_sampler(sampler, burn=burn)
names = self.free_param_names[:]
for n, m, l, u in zip(names, mean, ci_l, ci_u):
print("%s\t%4.4g\t[%4.4g, %4.4g]" % (n, m, l, u))
return sampler
|
Produce samples from the posterior for the hyperparameters using MCMC.
Returns the sampler created, because storing it stops the GP from being
pickleable. To add more samples to a previous sampler, pass the sampler
instance in the `sampler` keyword.
Parameters
----------
nwalkers : int, optional
The number of walkers to use in the sampler. Should be on the order
of several hundred. Default is 200.
nsamp : int, optional
Number of samples (per walker) to take. Default is 500.
burn : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with burn-in,
see :py:meth:`compute_from_MCMC`. The number of samples to discard
at the beginning of the chain. Default is 0.
thin : int, optional
This keyword only has an effect on the corner plot produced when
`plot_posterior` is True and the flattened chain plot produced
when `plot_chains` is True. To perform computations with thinning,
see :py:meth:`compute_from_MCMC`. Every `thin`-th sample is kept.
Default is 1.
num_proc : int or None, optional
Number of processors to use. If None, all available processors are
used. Default is None (use all available processors).
sampler : :py:class:`Sampler` instance
The sampler to use. If the sampler already has samples, the most
recent sample will be used as the starting point. Otherwise a
random sample from the hyperprior will be used.
plot_posterior : bool, optional
If True, a corner plot of the posterior for the hyperparameters
will be generated. Default is False.
plot_chains : bool, optional
If True, a plot showing the history and autocorrelation of the
chains will be produced.
sampler_type : str, optional
The type of sampler to use. Valid options are "ensemble" (affine-
invariant ensemble sampler) and "pt" (parallel-tempered ensemble
sampler).
ntemps : int, optional
Number of temperatures to use with the parallel-tempered ensemble
sampler.
sampler_a : float, optional
Scale of the proposal distribution.
plot_kwargs : additional keywords, optional
Extra arguments to pass to :py:func:`~gptools.utils.plot_sampler`.
|
entailment
|
def compute_from_MCMC(self, X, n=0, return_mean=True, return_std=True,
return_cov=False, return_samples=False,
return_mean_func=False, num_samples=1, noise=False,
samp_kwargs={}, sampler=None, flat_trace=None, burn=0,
thin=1, **kwargs):
"""Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns depend on the state of the boolean
flags, but will be some subset of (mean, stddev, cov, samples), in that
order. Samples will be the raw output of :py:meth:`draw_sample`, so you
will need to remember to convert to an array and flatten if you want to
work with a single sample.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
return_mean : bool, optional
If True, the mean will be computed at each hyperparameter sample.
Default is True (compute mean).
return_std : bool, optional
If True, the standard deviation will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_cov : bool, optional
If True, the covariance matrix will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_samples : bool, optional
If True, random sample(s) will be computed at each hyperparameter
sample. Default is False (do not compute samples).
num_samples : int, optional
Compute this many samples if `return_sample` is True. Default is 1.
noise : bool, optional
If True, noise is included in the predictions and samples. Default
is False (do not include noise).
samp_kwargs : dict, optional
If `return_sample` is True, the contents of this dictionary will be
passed as kwargs to :py:meth:`draw_sample`.
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : dict
A dictionary having some or all of the fields 'mean', 'std', 'cov'
and 'samp'. Each entry is a list of array-like. The length of this
list is equal to the number of hyperparameter samples used, and the
entries have the following shapes:
==== ====================
mean (`M`,)
std (`M`,)
cov (`M`, `M`)
samp (`M`, `num_samples`)
==== ====================
"""
output_transform = kwargs.pop('output_transform', None)
if flat_trace is None:
if sampler is None:
sampler = self.sample_hyperparameter_posterior(burn=burn, **kwargs)
# If we create the sampler, we need to make sure we clean up its pool:
try:
sampler.pool.close()
except AttributeError:
# This will occur if only one thread is used.
pass
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
else:
flat_trace = flat_trace[burn::thin, :]
num_proc = kwargs.get('num_proc', multiprocessing.cpu_count())
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
map_fun = pool.map
else:
map_fun = map
try:
res = map_fun(
_ComputeGPWrapper(
self,
X,
n,
return_mean,
return_std,
return_cov,
return_samples,
return_mean_func,
num_samples,
noise,
samp_kwargs,
output_transform
),
flat_trace
)
finally:
if num_proc > 1:
pool.close()
out = dict()
if return_mean:
out['mean'] = [r['mean'] for r in res if r is not None]
if return_std:
out['std'] = [r['std'] for r in res if r is not None]
if return_cov:
out['cov'] = [r['cov'] for r in res if r is not None]
if return_samples:
out['samp'] = [r['samp'] for r in res if r is not None]
if return_mean_func and self.mu is not None:
out['mean_func'] = [r['mean_func'] for r in res if r is not None]
out['cov_func'] = [r['cov_func'] for r in res if r is not None]
out['std_func'] = [r['std_func'] for r in res if r is not None]
out['mean_without_func'] = [r['mean_without_func'] for r in res if r is not None]
out['cov_without_func'] = [r['cov_without_func'] for r in res if r is not None]
out['std_without_func'] = [r['std_without_func'] for r in res if r is not None]
return out
|
Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns depend on the state of the boolean
flags, but will be some subset of (mean, stddev, cov, samples), in that
order. Samples will be the raw output of :py:meth:`draw_sample`, so you
will need to remember to convert to an array and flatten if you want to
work with a single sample.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
return_mean : bool, optional
If True, the mean will be computed at each hyperparameter sample.
Default is True (compute mean).
return_std : bool, optional
If True, the standard deviation will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_cov : bool, optional
If True, the covariance matrix will be computed at each
hyperparameter sample. Default is True (compute stddev).
return_samples : bool, optional
If True, random sample(s) will be computed at each hyperparameter
sample. Default is False (do not compute samples).
num_samples : int, optional
Compute this many samples if `return_sample` is True. Default is 1.
noise : bool, optional
If True, noise is included in the predictions and samples. Default
is False (do not include noise).
samp_kwargs : dict, optional
If `return_sample` is True, the contents of this dictionary will be
passed as kwargs to :py:meth:`draw_sample`.
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : dict
A dictionary having some or all of the fields 'mean', 'std', 'cov'
and 'samp'. Each entry is a list of array-like. The length of this
list is equal to the number of hyperparameter samples used, and the
entries have the following shapes:
==== ====================
mean (`M`,)
std (`M`,)
cov (`M`, `M`)
samp (`M`, `num_samples`)
==== ====================
|
entailment
|
def compute_l_from_MCMC(self, X, n=0, sampler=None, flat_trace=None, burn=0, thin=1, **kwargs):
"""Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns will contain the covariance length
scale function.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : array of float
Length scale function at the indicated points.
"""
if flat_trace is None:
if sampler is None:
sampler = self.sample_hyperparameter_posterior(burn=burn, **kwargs)
# If we create the sampler, we need to make sure we clean up
# its pool:
try:
sampler.pool.close()
except AttributeError:
# This will occur if only one thread is used.
pass
flat_trace = sampler.chain[:, burn::thin, :]
flat_trace = flat_trace.reshape((-1, flat_trace.shape[2]))
else:
flat_trace = flat_trace[burn::thin, :]
num_proc = kwargs.get('num_proc', multiprocessing.cpu_count())
if num_proc > 1:
pool = InterruptiblePool(processes=num_proc)
try:
res = pool.map(_ComputeLWrapper(self, X, n), flat_trace)
finally:
pool.close()
else:
res = map(_ComputeLWrapper(self, X, n), flat_trace)
return res
|
Compute desired quantities from MCMC samples of the hyperparameter posterior.
The return will be a list with a number of rows equal to the number of
hyperparameter samples. The columns will contain the covariance length
scale function.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
n : non-negative int or list, optional
The order of derivative to compute. For num_dim=1, this must be an
int. For num_dim=2, this must be a list of ints of length 2.
Default is 0 (don't take derivative).
sampler : :py:class:`Sampler` instance or None, optional
:py:class:`Sampler` instance that has already been run to the extent
desired on the hyperparameter posterior. If None, a new sampler will
be created with :py:meth:`sample_hyperparameter_posterior`. In this
case, all extra kwargs will be passed on, allowing you to set the
number of samples, etc. Default is None (create sampler).
flat_trace : array-like (`nsamp`, `ndim`) or None, optional
Flattened trace with samples of the free hyperparameters. If present,
overrides `sampler`. This allows you to use a sampler other than the
ones from :py:mod:`emcee`, or to specify arbitrary values you wish
to evaluate the curve at. Note that this WILL be thinned and burned
according to the following two kwargs. "Flat" refers to the fact
that you must have combined all chains into a single one. Default is
None (use `sampler`).
burn : int, optional
The number of samples to discard at the beginning of the chain.
Default is 0.
thin : int, optional
Every `thin`-th sample is kept. Default is 1.
num_proc : int, optional
The number of processors to use for evaluation. This is used both
when calling the sampler and when evaluating the Gaussian process.
If None, the number of available processors will be used. If zero,
evaluation will proceed in parallel. Default is to use all available
processors.
**kwargs : extra optional kwargs
All additional kwargs are passed to
:py:meth:`sample_hyperparameter_posterior`.
Returns
-------
out : array of float
Length scale function at the indicated points.
|
entailment
|
def predict_MCMC(self, X, ddof=1, full_MC=False, rejection_func=None, **kwargs):
"""Make a prediction using MCMC samples.
This is essentially a convenient wrapper of :py:meth:`compute_from_MCMC`,
designed to act more or less interchangeably with :py:meth:`predict`.
Computes the mean of the GP posterior marginalized over the
hyperparameters using iterated expectations. If `return_std` is True,
uses the law of total variance to compute the variance of the GP
posterior marginalized over the hyperparameters. If `return_cov` is True,
uses the law of total covariance to compute the entire covariance of the
GP posterior marginalized over the hyperparameters. If both `return_cov`
and `return_std` are True, then both the covariance matrix and standard
deviation array will be returned.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
ddof : int, optional
The degree of freedom correction to use when computing the variance.
Default is 1 (standard Bessel correction for unbiased estimate).
return_std : bool, optional
If True, the standard deviation is also computed. Default is True.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. Default is False (don't use full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
**kwargs : optional kwargs
All additional kwargs are passed directly to
:py:meth:`compute_from_MCMC`.
"""
return_std = kwargs.get('return_std', True)
return_cov = kwargs.get('return_cov', False)
if full_MC:
kwargs['return_mean'] = False
kwargs['return_std'] = False
kwargs['return_cov'] = False
kwargs['return_samples'] = True
else:
kwargs['return_mean'] = True
return_samples = kwargs.get('return_samples', True)
res = self.compute_from_MCMC(X, **kwargs)
out = {}
if return_samples:
samps = scipy.asarray(scipy.hstack(res['samp']))
if full_MC:
if rejection_func:
good_samps = []
for samp in samps.T:
if rejection_func(samp):
good_samps.append(samp)
if len(good_samps) == 0:
raise ValueError("Did not get any good samples!")
samps = scipy.asarray(good_samps, dtype=float).T
mean = scipy.mean(samps, axis=1)
cov = scipy.cov(samps, rowvar=1, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(cov))
else:
means = scipy.asarray(res['mean'])
mean = scipy.mean(means, axis=0)
# TODO: Allow use of robust estimators!
if 'cov' in res:
covs = scipy.asarray(res['cov'])
cov = scipy.mean(covs, axis=0) + scipy.cov(means, rowvar=0, ddof=ddof)
std = scipy.sqrt(scipy.diagonal(cov))
elif 'std' in res:
vars_ = scipy.asarray(scipy.asarray(res['std']))**2
std = scipy.sqrt(scipy.mean(vars_, axis=0) +
scipy.var(means, axis=0, ddof=ddof))
if 'mean_func' in res:
mean_funcs = scipy.asarray(res['mean_func'])
cov_funcs = scipy.asarray(res['cov_func'])
mean_func = scipy.mean(mean_funcs, axis=0)
cov_func = scipy.mean(cov_funcs, axis=0) + scipy.cov(mean_funcs, rowvar=0, ddof=ddof)
std_func = scipy.sqrt(scipy.diagonal(cov_func))
mean_without_funcs = scipy.asarray(res['mean_without_func'])
cov_without_funcs = scipy.asarray(res['cov_without_func'])
mean_without_func = scipy.mean(mean_without_funcs, axis=0)
cov_without_func = (
scipy.mean(cov_without_funcs, axis=0) +
scipy.cov(mean_without_funcs, rowvar=0, ddof=ddof)
)
std_without_func = scipy.sqrt(scipy.diagonal(cov_without_func))
out['mean_func'] = mean_func
out['cov_func'] = cov_func
out['std_func'] = std_func
out['mean_without_func'] = mean_without_func
out['cov_without_func'] = cov_without_func
out['std_without_func'] = std_without_func
out['mean'] = mean
if return_samples:
out['samp'] = samps
if return_std or return_cov:
out['std'] = std
if return_cov:
out['cov'] = cov
return out
|
Make a prediction using MCMC samples.
This is essentially a convenient wrapper of :py:meth:`compute_from_MCMC`,
designed to act more or less interchangeably with :py:meth:`predict`.
Computes the mean of the GP posterior marginalized over the
hyperparameters using iterated expectations. If `return_std` is True,
uses the law of total variance to compute the variance of the GP
posterior marginalized over the hyperparameters. If `return_cov` is True,
uses the law of total covariance to compute the entire covariance of the
GP posterior marginalized over the hyperparameters. If both `return_cov`
and `return_std` are True, then both the covariance matrix and standard
deviation array will be returned.
Parameters
----------
X : array-like (`M`,) or (`M`, `num_dim`)
The values to evaluate the Gaussian process at.
ddof : int, optional
The degree of freedom correction to use when computing the variance.
Default is 1 (standard Bessel correction for unbiased estimate).
return_std : bool, optional
If True, the standard deviation is also computed. Default is True.
full_MC : bool, optional
Set to True to compute the mean and covariance matrix using Monte
Carlo sampling of the posterior. The samples will also be returned
if full_output is True. Default is False (don't use full sampling).
rejection_func : callable, optional
Any samples where this function evaluates False will be rejected,
where it evaluates True they will be kept. Default is None (no
rejection). Only has an effect if `full_MC` is True.
ddof : int, optional
**kwargs : optional kwargs
All additional kwargs are passed directly to
:py:meth:`compute_from_MCMC`.
|
entailment
|
def build_parser():
"Build an argparse argument parser to parse the command line."
parser = argparse.ArgumentParser(
description="""Coursera OAuth2 client CLI. This tool
helps users of the Coursera App Platform to programmatically access
Coursera APIs.""",
epilog="""Please file bugs on github at:
https://github.com/coursera/courseraoauth2client/issues. If you
would like to contribute to this tool's development, check us out at:
https://github.com/coursera/courseraoauth2client""")
parser.add_argument('-c', '--config', help='the configuration file to use')
utils.add_logging_parser(parser)
# We support multiple subcommands. These subcommands have their own
# subparsers. Each subcommand should set a default value for the 'func'
# option. We then call the parsed 'func' function, and execution carries on
# from there.
subparsers = parser.add_subparsers()
commands.config.parser(subparsers)
commands.version.parser(subparsers)
return parser
|
Build an argparse argument parser to parse the command line.
|
entailment
|
def main():
"Boots up the command line tool"
logging.captureWarnings(True)
args = build_parser().parse_args()
# Configure logging
args.setup_logging(args)
# Dispatch into the appropriate subcommand function.
try:
return args.func(args)
except SystemExit:
raise
except:
logging.exception('Problem when running command. Sorry!')
sys.exit(1)
|
Boots up the command line tool
|
entailment
|
def sponsor_menu(
root_menu, menu="sponsors", label=_("Sponsors"),
sponsors_item=_("Our sponsors"),
packages_item=_("Sponsorship packages")):
"""Add sponsor menu links."""
root_menu.add_menu(menu, label, items=[])
for sponsor in (
Sponsor.objects.all()
.order_by('packages', 'order', 'id')
.prefetch_related('packages')):
symbols = sponsor.symbols()
if symbols:
item_name = u"» %s %s" % (sponsor.name, symbols)
else:
item_name = u"» %s" % (sponsor.name,)
with menu_logger(logger, "sponsor %r" % (sponsor.name,)):
root_menu.add_item(
item_name, sponsor.get_absolute_url(), menu=menu)
if sponsors_item:
with menu_logger(logger, "sponsors page link"):
root_menu.add_item(
sponsors_item, reverse("wafer_sponsors"), menu)
if packages_item:
with menu_logger(logger, "sponsorship package page link"):
root_menu.add_item(
packages_item, reverse("wafer_sponsorship_packages"), menu)
|
Add sponsor menu links.
|
entailment
|
def objectatrib(instance, atrib):
'''
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
'''
atrib = atrib.replace("__", ".")
atribs = []
atribs = atrib.split(".")
obj = instance
for atrib in atribs:
if type(obj) == dict:
result = obj[atrib]
else:
try:
result = getattr(obj, atrib)()
except Exception:
result = getattr(obj, atrib)
obj = result
return result
|
this filter is going to be useful to execute an object method or get an
object attribute dynamically. this method is going to take into account
the atrib param can contains underscores
|
entailment
|
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field.
"""
attrs = attrs or {}
attrs.update(self.form.get_widget_attrs(self))
if hasattr(self.field, 'widget_css_classes'):
css_classes = self.field.widget_css_classes
else:
css_classes = getattr(self.form, 'widget_css_classes', None)
if css_classes:
attrs.update({'class': css_classes})
widget_classes = self.form.fields[self.name].widget.attrs.get('class', None)
if widget_classes:
if attrs.get('class', None):
attrs['class'] += ' ' + widget_classes
else:
attrs.update({'class': widget_classes})
return super(NgBoundField, self).as_widget(widget, attrs, only_initial)
|
Renders the field.
|
entailment
|
def convert_widgets(self):
"""
During form initialization, some widgets have to be replaced by a counterpart suitable to
be rendered the AngularJS way.
"""
for field in self.base_fields.values():
try:
new_widget = field.get_converted_widget()
except AttributeError:
pass
else:
if new_widget:
field.widget = new_widget
|
During form initialization, some widgets have to be replaced by a counterpart suitable to
be rendered the AngularJS way.
|
entailment
|
def epochdate(timestamp):
'''
Convet an epoch date to a tuple in format ("yyyy-mm-dd","hh:mm:ss")
Example: "1023456427" -> ("2002-06-07","15:27:07")
Parameters:
- `timestamp`: date in epoch format
'''
dt = datetime.fromtimestamp(float(timestamp)).timetuple()
fecha = "{0:d}-{1:02d}-{2:02d}".format(dt.tm_year, dt.tm_mon, dt.tm_mday)
hora = "{0:02d}:{1:02d}:{2:02d}".format(dt.tm_hour, dt.tm_min, dt.tm_sec)
return (fecha, hora)
|
Convet an epoch date to a tuple in format ("yyyy-mm-dd","hh:mm:ss")
Example: "1023456427" -> ("2002-06-07","15:27:07")
Parameters:
- `timestamp`: date in epoch format
|
entailment
|
def model_inspect(obj):
'''
Analize itself looking for special information, right now it returns:
- Application name
- Model name
'''
# Prepare the information object
info = {}
if hasattr(obj, '_meta'):
info['verbose_name'] = getattr(obj._meta, 'verbose_name', None)
else:
info['verbose_name'] = None
# Get info from the object
if hasattr(obj, 'model') and obj.model:
model = obj.model
else:
model = obj.__class__
namesp = str(model)
namesp = namesp.replace("<class ", "").replace(">", "").replace("'", "").split(".")
# Remember information
info['appname'] = namesp[-3]
info['modelname'] = namesp[-1]
info['model'] = model
# Return the info
return info
|
Analize itself looking for special information, right now it returns:
- Application name
- Model name
|
entailment
|
def upload_path(instance, filename):
'''
This method is created to return the path to upload files. This path must be
different from any other to avoid problems.
'''
path_separator = "/"
date_separator = "-"
ext_separator = "."
empty_string = ""
# get the model name
model_name = model_inspect(instance)['modelname']
# get the string date
date = datetime.now().strftime("%Y-%m-%d").split(date_separator)
curr_day = date[2]
curr_month = date[1]
curr_year = date[0]
split_filename = filename.split(ext_separator)
filename = empty_string.join(split_filename[:-1])
file_ext = split_filename[-1]
new_filename = empty_string.join([filename, str(random.random()).split(ext_separator)[1]])
new_filename = ext_separator.join([new_filename, file_ext])
string_path = path_separator.join([model_name, curr_year, curr_month, curr_day, new_filename])
# the path is built using the current date and the modelname
return string_path
|
This method is created to return the path to upload files. This path must be
different from any other to avoid problems.
|
entailment
|
def remove_getdisplay(field_name):
'''
for string 'get_FIELD_NAME_display' return 'FIELD_NAME'
'''
str_ini = 'get_'
str_end = '_display'
if str_ini == field_name[0:len(str_ini)] and str_end == field_name[(-1) * len(str_end):]:
field_name = field_name[len(str_ini):(-1) * len(str_end)]
return field_name
|
for string 'get_FIELD_NAME_display' return 'FIELD_NAME'
|
entailment
|
def JSONEncoder_newdefault(kind=['uuid', 'datetime', 'time', 'decimal']):
'''
JSON Encoder newdfeault is a wrapper capable of encoding several kinds
Usage:
from codenerix.helpers import JSONEncoder_newdefault
JSONEncoder_newdefault()
'''
JSONEncoder_olddefault = json.JSONEncoder.default
def JSONEncoder_wrapped(self, o):
'''
json.JSONEncoder.default = JSONEncoder_newdefault
'''
if ('uuid' in kind) and isinstance(o, UUID):
return str(o)
if ('datetime' in kind) and isinstance(o, datetime):
return str(o)
if ('time' in kind) and isinstance(o, time.struct_time):
return datetime.fromtimestamp(time.mktime(o))
if ('decimal' in kind) and isinstance(o, decimal.Decimal):
return str(o)
return JSONEncoder_olddefault(self, o)
json.JSONEncoder.default = JSONEncoder_wrapped
|
JSON Encoder newdfeault is a wrapper capable of encoding several kinds
Usage:
from codenerix.helpers import JSONEncoder_newdefault
JSONEncoder_newdefault()
|
entailment
|
def context_processors_update(context, request):
'''
Update context with context_processors from settings
Usage:
from codenerix.helpers import context_processors_update
context_processors_update(context, self.request)
'''
for template in settings.TEMPLATES:
for context_processor in template['OPTIONS']['context_processors']:
path = context_processor.split('.')
name = path.pop(-1)
processor = getattr(importlib.import_module('.'.join(path)), name, None)
if processor:
context.update(processor(request))
return context
|
Update context with context_processors from settings
Usage:
from codenerix.helpers import context_processors_update
context_processors_update(context, self.request)
|
entailment
|
def append(self, filename_in_zip, file_contents):
'''
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
'''
# Set the file pointer to the end of the file
self.in_memory_zip.seek(-1, io.SEEK_END)
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
# Close the ZipFile
zf.close()
# Rewind the file
self.in_memory_zip.seek(0)
return self
|
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
|
entailment
|
def writetofile(self, filename):
'''Writes the in-memory zip to a file.'''
f = open(filename, "w")
f.write(self.read())
f.close()
|
Writes the in-memory zip to a file.
|
entailment
|
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return ''
|
Returns the corresponding url from the sponsors images
|
entailment
|
def sponsor_tagged_image(sponsor, tag):
"""returns the corresponding url from the tagged image list."""
if sponsor.files.filter(tag_name=tag).exists():
return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url
return ''
|
returns the corresponding url from the tagged image list.
|
entailment
|
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %}
"""
try:
tokensp = token.split_contents()
groups = []
groups+=tokensp[1:]
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires at least 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(tuple(['endifusergroup',]))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(groups, nodelist_true, nodelist_false)
|
Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %}
|
entailment
|
def OpenHandle(self):
'''Gets a handle for use with other vSphere Guest API functions. The guest library
handle provides a context for accessing information about the virtual machine.
Virtual machine statistics and state data are associated with a particular guest library
handle, so using one handle does not affect the data associated with another handle.'''
if hasattr(self, 'handle'):
return self.handle
else:
handle = c_void_p()
ret = vmGuestLib.VMGuestLib_OpenHandle(byref(handle))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return handle
|
Gets a handle for use with other vSphere Guest API functions. The guest library
handle provides a context for accessing information about the virtual machine.
Virtual machine statistics and state data are associated with a particular guest library
handle, so using one handle does not affect the data associated with another handle.
|
entailment
|
def CloseHandle(self):
'''Releases a handle acquired with VMGuestLib_OpenHandle'''
if hasattr(self, 'handle'):
ret = vmGuestLib.VMGuestLib_CloseHandle(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
del(self.handle)
|
Releases a handle acquired with VMGuestLib_OpenHandle
|
entailment
|
def UpdateInfo(self):
'''Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.'''
ret = vmGuestLib.VMGuestLib_UpdateInfo(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
|
Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.
|
entailment
|
def GetSessionId(self):
'''Retrieves the VMSessionID for the current session. Call this function after calling
VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called,
VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.'''
sid = c_void_p()
ret = vmGuestLib.VMGuestLib_GetSessionId(self.handle.value, byref(sid))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return sid
|
Retrieves the VMSessionID for the current session. Call this function after calling
VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called,
VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.
|
entailment
|
def GetCpuLimitMHz(self):
'''Retrieves the upperlimit of processor use in MHz available to the virtual
machine. For information about setting the CPU limit, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuLimitMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the upperlimit of processor use in MHz available to the virtual
machine. For information about setting the CPU limit, see "Limits and
Reservations" on page 14.
|
entailment
|
def GetCpuReservationMHz(self):
'''Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14.
|
entailment
|
def GetCpuShares(self):
'''Retrieves the number of CPU shares allocated to the virtual machine. For
information about how an ESX server uses CPU shares to manage virtual
machine priority, see the vSphere Resource Management Guide.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuShares(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the number of CPU shares allocated to the virtual machine. For
information about how an ESX server uses CPU shares to manage virtual
machine priority, see the vSphere Resource Management Guide.
|
entailment
|
def GetCpuStolenMs(self):
'''Retrieves the number of milliseconds that the virtual machine was in a
ready state (able to transition to a run state), but was not scheduled to run.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetCpuStolenMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the number of milliseconds that the virtual machine was in a
ready state (able to transition to a run state), but was not scheduled to run.
|
entailment
|
def GetCpuUsedMs(self):
'''Retrieves the number of milliseconds during which the virtual machine
has used the CPU. This value includes the time used by the guest
operating system and the time used by virtualization code for tasks for this
virtual machine. You can combine this value with the elapsed time
(VMGuestLib_GetElapsedMs) to estimate the effective virtual machine
CPU speed. This value is a subset of elapsedMs.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetCpuUsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the number of milliseconds during which the virtual machine
has used the CPU. This value includes the time used by the guest
operating system and the time used by virtualization code for tasks for this
virtual machine. You can combine this value with the elapsed time
(VMGuestLib_GetElapsedMs) to estimate the effective virtual machine
CPU speed. This value is a subset of elapsedMs.
|
entailment
|
def GetElapsedMs(self):
'''Retrieves the number of milliseconds that have passed in the virtual
machine since it last started running on the server. The count of elapsed
time restarts each time the virtual machine is powered on, resumed, or
migrated using VMotion. This value counts milliseconds, regardless of
whether the virtual machine is using processing power during that time.
You can combine this value with the CPU time used by the virtual machine
(VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine
CPU speed. cpuUsedMs is a subset of this value.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetElapsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the number of milliseconds that have passed in the virtual
machine since it last started running on the server. The count of elapsed
time restarts each time the virtual machine is powered on, resumed, or
migrated using VMotion. This value counts milliseconds, regardless of
whether the virtual machine is using processing power during that time.
You can combine this value with the CPU time used by the virtual machine
(VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine
CPU speed. cpuUsedMs is a subset of this value.
|
entailment
|
def GetHostCpuUsedMs(self):
'''Undocumented.'''
counter = c_uint64()
ret = vmGuestLib.VMGuestLib_GetHostCpuUsedMs(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemKernOvhdMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemKernOvhdMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemMappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemMappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemPhysFreeMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysFreeMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemPhysMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemPhysMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemSharedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemSharedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemSwappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemSwappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemUnmappedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUnmappedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostMemUsedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostNumCpuCores(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostNumCpuCores(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Undocumented.
|
entailment
|
def GetHostProcessorSpeed(self):
'''Retrieves the speed of the ESX system's physical CPU in MHz.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetHostProcessorSpeed(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the speed of the ESX system's physical CPU in MHz.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.