sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def getSpaceUse(self):
"""Get disk space usage.
@return: Dictionary of filesystem space utilization stats for filesystems.
"""
stats = {}
try:
out = subprocess.Popen([dfCmd, "-Pk"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % dfCmd)
lines = out.splitlines()
if len(lines) > 1:
for line in lines[1:]:
fsstats = {}
cols = line.split()
fsstats['device'] = cols[0]
fsstats['type'] = self._fstypeDict[cols[5]]
fsstats['total'] = 1024 * int(cols[1])
fsstats['inuse'] = 1024 * int(cols[2])
fsstats['avail'] = 1024 * int(cols[3])
fsstats['inuse_pcent'] = int(cols[4][:-1])
stats[cols[5]] = fsstats
return stats
|
Get disk space usage.
@return: Dictionary of filesystem space utilization stats for filesystems.
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
stats = self._dbconn.getDatabaseStats()
databases = stats.get('databases')
totals = stats.get('totals')
if self.hasGraph('pg_connections'):
limit = self._dbconn.getParam('max_connections')
self.setGraphVal('pg_connections', 'max_conn', limit)
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_connections', db,
dbstats['numbackends'])
self.setGraphVal('pg_connections', 'total', totals['numbackends'])
if self.hasGraph('pg_diskspace'):
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_diskspace', db, dbstats['disk_size'])
self.setGraphVal('pg_diskspace', 'total', totals['disk_size'])
if self.hasGraph('pg_blockreads'):
self.setGraphVal('pg_blockreads', 'blk_hit', totals['blks_hit'])
self.setGraphVal('pg_blockreads', 'blk_read', totals['blks_read'])
if self.hasGraph('pg_xact'):
self.setGraphVal('pg_xact', 'commits', totals['xact_commit'])
self.setGraphVal('pg_xact', 'rollbacks', totals['xact_rollback'])
if self.hasGraph('pg_tup_read'):
self.setGraphVal('pg_tup_read', 'fetch', totals['tup_fetched'])
self.setGraphVal('pg_tup_read', 'return', totals['tup_returned'])
if self.hasGraph('pg_tup_write'):
self.setGraphVal('pg_tup_write', 'delete', totals['tup_deleted'])
self.setGraphVal('pg_tup_write', 'update', totals['tup_updated'])
self.setGraphVal('pg_tup_write', 'insert', totals['tup_inserted'])
lock_stats = None
for lock_state in ('all', 'wait',):
graph_name = "pg_lock_%s" % lock_state
if self.hasGraph(graph_name):
if lock_stats is None:
lock_stats = self._dbconn.getLockStatsMode()
mode_iter = iter(PgInfo.lockModes)
for mode in ('AccessExcl', 'Excl', 'ShrRwExcl', 'Shr',
'ShrUpdExcl', 'RwExcl', 'RwShr', 'AccessShr',):
self.setGraphVal(graph_name, mode,
lock_stats[lock_state].get(mode_iter.next()))
stats = None
if self.hasGraph('pg_checkpoints'):
if stats is None:
stats = self._dbconn.getBgWriterStats()
self.setGraphVal('pg_checkpoints', 'req',
stats.get('checkpoints_req'))
self.setGraphVal('pg_checkpoints', 'timed',
stats.get('checkpoints_timed'))
if self.hasGraph('pg_bgwriter'):
if stats is None:
stats = self._dbconn.getBgWriterStats()
self.setGraphVal('pg_bgwriter', 'backend',
stats.get('buffers_backend'))
self.setGraphVal('pg_bgwriter', 'clean',
stats.get('buffers_clean'))
self.setGraphVal('pg_bgwriter', 'chkpoint',
stats.get('buffers_checkpoint'))
if self._detailGraphs:
for (db, dbstats) in databases.iteritems():
if self.dbIncluded(db):
if self.hasGraph('pg_blockread_detail'):
self.setGraphVal('pg_blockread_detail', db,
dbstats['blks_hit'] + dbstats['blks_read'])
for (graph_name, attr_name) in (
('pg_xact_commit_detail', 'xact_commit'),
('pg_xact_rollback_detail', 'xact_rollback'),
('pg_tup_return_detail', 'tup_returned'),
('pg_tup_fetch_detail', 'tup_fetched'),
('pg_tup_delete_detail', 'tup_deleted'),
('pg_tup_update_detail', 'tup_updated'),
('pg_tup_insert_detail', 'tup_inserted'),
):
if self.hasGraph(graph_name):
self.setGraphVal(graph_name, db, dbstats[attr_name])
lock_stats_db = None
for lock_state in ('all', 'wait',):
graph_name = "pg_lock_%s_detail" % lock_state
if self.hasGraph(graph_name):
if lock_stats_db is None:
lock_stats_db = self._dbconn.getLockStatsDB()
self.setGraphVal(graph_name, db,
lock_stats_db[lock_state].get(db, 0))
if self._replGraphs:
repl_stats = self._dbconn.getSlaveConflictStats()
if self.hasGraph('pg_repl_conflicts'):
for field in self.getGraphFieldList('pg_repl_conflicts'):
self.setGraphVal('pg_repl_conflicts', field,
repl_stats['totals'].get("confl_%s" % field))
if self._detailGraphs and self.hasGraph('pg_repl_conflicts_detail'):
for (db, dbstats) in repl_stats['databases'].iteritems():
if self.dbIncluded(db):
self.setGraphVal('pg_repl_conflicts_detail', db,
sum(dbstats.values()))
|
Retrieve values for graphs.
|
entailment
|
def connect(self, host, port):
"""Connects via a RS-485 to Ethernet adapter."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
self._reader = sock.makefile(mode='rb')
self._writer = sock.makefile(mode='wb')
|
Connects via a RS-485 to Ethernet adapter.
|
entailment
|
def process(self, data_changed_callback):
"""Process data; returns when the reader signals EOF.
Callback is notified when any data changes."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
while True:
byte = self._reader.read(1)
while True:
# Search for FRAME_DLE + FRAME_STX
if not byte:
return
if byte[0] == self.FRAME_DLE:
next_byte = self._reader.read(1)
if not next_byte:
return
if next_byte[0] == self.FRAME_STX:
break
else:
continue
byte = self._reader.read(1)
frame = bytearray()
byte = self._reader.read(1)
while True:
if not byte:
return
if byte[0] == self.FRAME_DLE:
# Should be FRAME_ETX or 0 according to
# the AQ-CO-SERIAL manual
next_byte = self._reader.read(1)
if not next_byte:
return
if next_byte[0] == self.FRAME_ETX:
break
elif next_byte[0] != 0:
# Error?
pass
frame.append(byte[0])
byte = self._reader.read(1)
# Verify CRC
frame_crc = int.from_bytes(frame[-2:], byteorder='big')
frame = frame[:-2]
calculated_crc = self.FRAME_DLE + self.FRAME_STX
for byte in frame:
calculated_crc += byte
if frame_crc != calculated_crc:
_LOGGER.warning('Bad CRC')
continue
frame_type = frame[0:2]
frame = frame[2:]
if frame_type == self.FRAME_TYPE_KEEP_ALIVE:
# Keep alive
# If a frame has been queued for transmit, send it.
if not self._send_queue.empty():
data = self._send_queue.get(block=False)
self._writer.write(data['frame'])
self._writer.flush()
_LOGGER.info('Sent: %s', binascii.hexlify(data['frame']))
try:
if data['desired_states'] is not None:
# Set a timer to verify the state changes
# Wait 2 seconds as it can take a while for
# the state to change.
Timer(2.0, self._check_state, [data]).start()
except KeyError:
pass
continue
elif frame_type == self.FRAME_TYPE_KEY_EVENT:
_LOGGER.info('Key: %s', binascii.hexlify(frame))
elif frame_type == self.FRAME_TYPE_LEDS:
_LOGGER.debug('LEDs: %s', binascii.hexlify(frame))
# First 4 bytes are the LEDs that are on;
# second 4 bytes_ are the LEDs that are flashing
states = int.from_bytes(frame[0:4], byteorder='little')
flashing_states = int.from_bytes(frame[4:8],
byteorder='little')
states |= flashing_states
if (states != self._states
or flashing_states != self._flashing_states):
self._states = states
self._flashing_states = flashing_states
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_PUMP_SPEED_REQUEST:
value = int.from_bytes(frame[0:2], byteorder='big')
_LOGGER.debug('Pump speed request: %d%%', value)
if self._pump_speed != value:
self._pump_speed = value
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_PUMP_STATUS:
# Pump status messages sent out by Hayward VSP pumps
self._multi_speed_pump = True
speed = frame[2]
# Power is in BCD
power = ((((frame[3] & 0xf0) >> 4) * 1000)
+ (((frame[3] & 0x0f)) * 100)
+ (((frame[4] & 0xf0) >> 4) * 10)
+ (((frame[4] & 0x0f))))
_LOGGER.debug('Pump speed: %d%%, power: %d watts',
speed, power)
if self._pump_power != power:
self._pump_power = power
data_changed_callback(self)
elif frame_type == self.FRAME_TYPE_DISPLAY_UPDATE:
parts = frame.decode('latin-1').split()
_LOGGER.debug('Display update: %s', parts)
try:
if parts[0] == 'Pool' and parts[1] == 'Temp':
# Pool Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._pool_temp != value:
self._pool_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Spa' and parts[1] == 'Temp':
# Spa Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._spa_temp != value:
self._spa_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Air' and parts[1] == 'Temp':
# Air Temp <temp>°[C|F]
value = int(parts[2][:-2])
if self._air_temp != value:
self._air_temp = value
self._is_metric = parts[2][-1:] == 'C'
data_changed_callback(self)
elif parts[0] == 'Pool' and parts[1] == 'Chlorinator':
# Pool Chlorinator <value>%
value = int(parts[2][:-1])
if self._pool_chlorinator != value:
self._pool_chlorinator = value
data_changed_callback(self)
elif parts[0] == 'Spa' and parts[1] == 'Chlorinator':
# Spa Chlorinator <value>%
value = int(parts[2][:-1])
if self._spa_chlorinator != value:
self._spa_chlorinator = value
data_changed_callback(self)
elif parts[0] == 'Salt' and parts[1] == 'Level':
# Salt Level <value> [g/L|PPM|
value = float(parts[2])
if self._salt_level != value:
self._salt_level = value
self._is_metric = parts[3] == 'g/L'
data_changed_callback(self)
elif parts[0] == 'Check' and parts[1] == 'System':
# Check System <msg>
value = ' '.join(parts[2:])
if self._check_system_msg != value:
self._check_system_msg = value
data_changed_callback(self)
except ValueError:
pass
else:
_LOGGER.info('Unknown frame: %s %s',
binascii.hexlify(frame_type),
binascii.hexlify(frame))
|
Process data; returns when the reader signals EOF.
Callback is notified when any data changes.
|
entailment
|
def send_key(self, key):
"""Sends a key."""
_LOGGER.info('Queueing key %s', key)
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame})
|
Sends a key.
|
entailment
|
def states(self):
"""Returns a set containing the enabled states."""
state_list = []
for state in States:
if state.value & self._states != 0:
state_list.append(state)
if (self._flashing_states & States.FILTER) != 0:
state_list.append(States.FILTER_LOW_SPEED)
return state_list
|
Returns a set containing the enabled states.
|
entailment
|
def get_state(self, state):
"""Returns True if the specified state is enabled."""
# Check to see if we have a change request pending; if we do
# return the value we expect it to change to.
for data in list(self._send_queue.queue):
desired_states = data['desired_states']
for desired_state in desired_states:
if desired_state['state'] == state:
return desired_state['enabled']
if state == States.FILTER_LOW_SPEED:
return (States.FILTER.value & self._flashing_states) != 0
return (state.value & self._states) != 0
|
Returns True if the specified state is enabled.
|
entailment
|
def set_state(self, state, enable):
"""Set the state."""
is_enabled = self.get_state(state)
if is_enabled == enable:
return True
key = None
desired_states = [{'state': state, 'enabled': not is_enabled}]
if state == States.FILTER_LOW_SPEED:
if not self._multi_speed_pump:
return False
# Send the FILTER key once.
# If the pump is in high speed, it wil switch to low speed.
# If the pump is off the retry mechanism will send an additional
# FILTER key to switch into low speed.
# If the pump is in low speed then we pretend the pump is off;
# the retry mechanism will send an additional FILTER key
# to switch into high speed.
key = Keys.FILTER
desired_states.append({'state': States.FILTER, 'enabled': True})
else:
# See if this state has a corresponding Key
try:
key = Keys[state.name]
except KeyError:
# TODO: send the appropriate combination of keys
# to enable the state
return False
frame = self._get_key_event_frame(key)
# Queue it to send immediately following the reception
# of a keep-alive packet in an attempt to avoid bus collisions.
self._send_queue.put({'frame': frame, 'desired_states': desired_states,
'retries': 10})
return True
|
Set the state.
|
entailment
|
def trace(function, *args, **k) :
"""Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True"""
if doTrace : print ("> "+function.__name__, args, k)
result = function(*args, **k)
if doTrace : print ("< "+function.__name__, args, k, "->", result)
return result
|
Decorates a function by tracing the begining and
end of the function execution, if doTrace global is True
|
entailment
|
def geocode(self, location) :
url = QtCore.QUrl("http://maps.googleapis.com/maps/api/geocode/xml")
url.addQueryItem("address", location)
url.addQueryItem("sensor", "false")
"""
url = QtCore.QUrl("http://maps.google.com/maps/geo/")
url.addQueryItem("q", location)
url.addQueryItem("output", "csv")
url.addQueryItem("sensor", "false")
"""
request = QtNetwork.QNetworkRequest(url)
reply = self.get(request)
while reply.isRunning() :
QtGui.QApplication.processEvents()
reply.deleteLater()
self.deleteLater()
return self._parseResult(reply)
|
url = QtCore.QUrl("http://maps.google.com/maps/geo/")
url.addQueryItem("q", location)
url.addQueryItem("output", "csv")
url.addQueryItem("sensor", "false")
|
entailment
|
def correlate(params, corrmat):
"""
Force a correlation matrix on a set of statistically distributed objects.
This function works on objects in-place.
Parameters
----------
params : array
An array of of uv objects.
corrmat : 2d-array
The correlation matrix to be imposed
"""
# Make sure all inputs are compatible
assert all(
[isinstance(param, UncertainFunction) for param in params]
), 'All inputs to "correlate" must be of type "UncertainFunction"'
# Put each ufunc's samples in a column-wise matrix
data = np.vstack([param._mcpts for param in params]).T
# Apply the correlation matrix to the sampled data
new_data = induce_correlations(data, corrmat)
# Re-set the samples to the respective variables
for i in range(len(params)):
params[i]._mcpts = new_data[:, i]
|
Force a correlation matrix on a set of statistically distributed objects.
This function works on objects in-place.
Parameters
----------
params : array
An array of of uv objects.
corrmat : 2d-array
The correlation matrix to be imposed
|
entailment
|
def induce_correlations(data, corrmat):
"""
Induce a set of correlations on a column-wise dataset
Parameters
----------
data : 2d-array
An m-by-n array where m is the number of samples and n is the
number of independent variables, each column of the array corresponding
to each variable
corrmat : 2d-array
An n-by-n array that defines the desired correlation coefficients
(between -1 and 1). Note: the matrix must be symmetric and
positive-definite in order to induce.
Returns
-------
new_data : 2d-array
An m-by-n array that has the desired correlations.
"""
# Create an rank-matrix
data_rank = np.vstack([rankdata(datai) for datai in data.T]).T
# Generate van der Waerden scores
data_rank_score = data_rank / (data_rank.shape[0] + 1.0)
data_rank_score = norm(0, 1).ppf(data_rank_score)
# Calculate the lower triangular matrix of the Cholesky decomposition
# of the desired correlation matrix
p = chol(corrmat)
# Calculate the current correlations
t = np.corrcoef(data_rank_score, rowvar=0)
# Calculate the lower triangular matrix of the Cholesky decomposition
# of the current correlation matrix
q = chol(t)
# Calculate the re-correlation matrix
s = np.dot(p, np.linalg.inv(q))
# Calculate the re-sampled matrix
new_data = np.dot(data_rank_score, s.T)
# Create the new rank matrix
new_data_rank = np.vstack([rankdata(datai) for datai in new_data.T]).T
# Sort the original data according to new_data_rank
for i in range(data.shape[1]):
vals, order = np.unique(
np.hstack((data_rank[:, i], new_data_rank[:, i])), return_inverse=True
)
old_order = order[: new_data_rank.shape[0]]
new_order = order[-new_data_rank.shape[0] :]
tmp = data[np.argsort(old_order), i][new_order]
data[:, i] = tmp[:]
return data
|
Induce a set of correlations on a column-wise dataset
Parameters
----------
data : 2d-array
An m-by-n array where m is the number of samples and n is the
number of independent variables, each column of the array corresponding
to each variable
corrmat : 2d-array
An n-by-n array that defines the desired correlation coefficients
(between -1 and 1). Note: the matrix must be symmetric and
positive-definite in order to induce.
Returns
-------
new_data : 2d-array
An m-by-n array that has the desired correlations.
|
entailment
|
def plotcorr(X, plotargs=None, full=True, labels=None):
"""
Plots a scatterplot matrix of subplots.
Usage:
plotcorr(X)
plotcorr(..., plotargs=...) # e.g., 'r*', 'bo', etc.
plotcorr(..., full=...) # e.g., True or False
plotcorr(..., labels=...) # e.g., ['label1', 'label2', ...]
Each column of "X" is plotted against other columns, resulting in
a ncols by ncols grid of subplots with the diagonal subplots labeled
with "labels". "X" is an array of arrays (i.e., a 2d matrix), a 1d array
of MCERP.UncertainFunction/Variable objects, or a mixture of the two.
Additional keyword arguments are passed on to matplotlib's "plot" command.
Returns the matplotlib figure object containing the subplot grid.
"""
import matplotlib.pyplot as plt
X = [Xi._mcpts if isinstance(Xi, UncertainFunction) else Xi for Xi in X]
X = np.atleast_2d(X)
numvars, numdata = X.shape
fig, axes = plt.subplots(nrows=numvars, ncols=numvars, figsize=(8, 8))
fig.subplots_adjust(hspace=0.0, wspace=0.0)
for ax in axes.flat:
# Hide all ticks and labels
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# Set up ticks only on one side for the "edge" subplots...
if full:
if ax.is_first_col():
ax.yaxis.set_ticks_position("left")
if ax.is_last_col():
ax.yaxis.set_ticks_position("right")
if ax.is_first_row():
ax.xaxis.set_ticks_position("top")
if ax.is_last_row():
ax.xaxis.set_ticks_position("bottom")
else:
if ax.is_first_row():
ax.xaxis.set_ticks_position("top")
if ax.is_last_col():
ax.yaxis.set_ticks_position("right")
# Label the diagonal subplots...
if not labels:
labels = ["x" + str(i) for i in range(numvars)]
for i, label in enumerate(labels):
axes[i, i].annotate(
label, (0.5, 0.5), xycoords="axes fraction", ha="center", va="center"
)
# Plot the data
for i, j in zip(*np.triu_indices_from(axes, k=1)):
if full:
idx = [(i, j), (j, i)]
else:
idx = [(i, j)]
for x, y in idx:
# FIX #1: this needed to be changed from ...(data[x], data[y],...)
if plotargs is None:
if len(X[x]) > 100:
plotargs = ",b" # pixel marker
else:
plotargs = ".b" # point marker
axes[x, y].plot(X[y], X[x], plotargs)
ylim = min(X[y]), max(X[y])
xlim = min(X[x]), max(X[x])
axes[x, y].set_ylim(
xlim[0] - (xlim[1] - xlim[0]) * 0.1, xlim[1] + (xlim[1] - xlim[0]) * 0.1
)
axes[x, y].set_xlim(
ylim[0] - (ylim[1] - ylim[0]) * 0.1, ylim[1] + (ylim[1] - ylim[0]) * 0.1
)
# Turn on the proper x or y axes ticks.
if full:
for i, j in zip(list(range(numvars)), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
axes[i, j].yaxis.set_visible(True)
else:
for i in range(numvars - 1):
axes[0, i + 1].xaxis.set_visible(True)
axes[i, -1].yaxis.set_visible(True)
for i in range(1, numvars):
for j in range(0, i):
fig.delaxes(axes[i, j])
# FIX #2: if numvars is odd, the bottom right corner plot doesn't have the
# correct axes limits, so we pull them from other axes
if numvars % 2:
xlimits = axes[0, -1].get_xlim()
ylimits = axes[-1, 0].get_ylim()
axes[-1, -1].set_xlim(xlimits)
axes[-1, -1].set_ylim(ylimits)
return fig
|
Plots a scatterplot matrix of subplots.
Usage:
plotcorr(X)
plotcorr(..., plotargs=...) # e.g., 'r*', 'bo', etc.
plotcorr(..., full=...) # e.g., True or False
plotcorr(..., labels=...) # e.g., ['label1', 'label2', ...]
Each column of "X" is plotted against other columns, resulting in
a ncols by ncols grid of subplots with the diagonal subplots labeled
with "labels". "X" is an array of arrays (i.e., a 2d matrix), a 1d array
of MCERP.UncertainFunction/Variable objects, or a mixture of the two.
Additional keyword arguments are passed on to matplotlib's "plot" command.
Returns the matplotlib figure object containing the subplot grid.
|
entailment
|
def chol(A):
"""
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
"""
A = np.array(A)
assert A.shape[0] == A.shape[1], "Input matrix must be square"
L = [[0.0] * len(A) for _ in range(len(A))]
for i in range(len(A)):
for j in range(i + 1):
s = sum(L[i][k] * L[j][k] for k in range(j))
L[i][j] = (
(A[i][i] - s) ** 0.5 if (i == j) else (1.0 / L[j][j] * (A[i][j] - s))
)
return np.array(L)
|
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
|
entailment
|
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
|
A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
|
entailment
|
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
|
A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
|
entailment
|
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
|
Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
|
entailment
|
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
|
Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
|
entailment
|
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
|
Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
|
entailment
|
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
|
Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
|
entailment
|
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
|
Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
|
entailment
|
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
|
Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
|
entailment
|
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
|
Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
|
entailment
|
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
|
Gets whois information for a domain
|
entailment
|
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
|
Gets whois history for a domain
|
entailment
|
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
|
Gets the domains that have been registered with a nameserver or
nameservers
|
entailment
|
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
|
Searches for domains that match a given pattern
|
entailment
|
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
|
Return an object representing the samples identified by the input domain, IP, or URL
|
entailment
|
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
|
Return an object representing the sample identified by the input hash, or an empty object if that sample is not found
|
entailment
|
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
|
Gets the AS information for a given IP address.
|
entailment
|
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
|
Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.
|
entailment
|
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
|
entailment
|
def abs(x):
"""
Absolute value
"""
if isinstance(x, UncertainFunction):
mcpts = np.abs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.abs(x)
|
Absolute value
|
entailment
|
def acos(x):
"""
Inverse cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arccos(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arccos(x)
|
Inverse cosine
|
entailment
|
def acosh(x):
"""
Inverse hyperbolic cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arccosh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arccosh(x)
|
Inverse hyperbolic cosine
|
entailment
|
def asin(x):
"""
Inverse sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arcsin(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arcsin(x)
|
Inverse sine
|
entailment
|
def asinh(x):
"""
Inverse hyperbolic sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arcsinh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arcsinh(x)
|
Inverse hyperbolic sine
|
entailment
|
def atan(x):
"""
Inverse tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.arctan(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arctan(x)
|
Inverse tangent
|
entailment
|
def atanh(x):
"""
Inverse hyperbolic tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.arctanh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arctanh(x)
|
Inverse hyperbolic tangent
|
entailment
|
def ceil(x):
"""
Ceiling function (round towards positive infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.ceil(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.ceil(x)
|
Ceiling function (round towards positive infinity)
|
entailment
|
def cos(x):
"""
Cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.cos(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.cos(x)
|
Cosine
|
entailment
|
def cosh(x):
"""
Hyperbolic cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.cosh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.cosh(x)
|
Hyperbolic cosine
|
entailment
|
def degrees(x):
"""
Convert radians to degrees
"""
if isinstance(x, UncertainFunction):
mcpts = np.degrees(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.degrees(x)
|
Convert radians to degrees
|
entailment
|
def exp(x):
"""
Exponential function
"""
if isinstance(x, UncertainFunction):
mcpts = np.exp(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.exp(x)
|
Exponential function
|
entailment
|
def expm1(x):
"""
Calculate exp(x) - 1
"""
if isinstance(x, UncertainFunction):
mcpts = np.expm1(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.expm1(x)
|
Calculate exp(x) - 1
|
entailment
|
def fabs(x):
"""
Absolute value function
"""
if isinstance(x, UncertainFunction):
mcpts = np.fabs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.fabs(x)
|
Absolute value function
|
entailment
|
def floor(x):
"""
Floor function (round towards negative infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.floor(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.floor(x)
|
Floor function (round towards negative infinity)
|
entailment
|
def hypot(x, y):
"""
Calculate the hypotenuse given two "legs" of a right triangle
"""
if isinstance(x, UncertainFunction) or isinstance(x, UncertainFunction):
ufx = to_uncertain_func(x)
ufy = to_uncertain_func(y)
mcpts = np.hypot(ufx._mcpts, ufy._mcpts)
return UncertainFunction(mcpts)
else:
return np.hypot(x, y)
|
Calculate the hypotenuse given two "legs" of a right triangle
|
entailment
|
def log(x):
"""
Natural logarithm
"""
if isinstance(x, UncertainFunction):
mcpts = np.log(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log(x)
|
Natural logarithm
|
entailment
|
def log10(x):
"""
Base-10 logarithm
"""
if isinstance(x, UncertainFunction):
mcpts = np.log10(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log10(x)
|
Base-10 logarithm
|
entailment
|
def log1p(x):
"""
Natural logarithm of (1 + x)
"""
if isinstance(x, UncertainFunction):
mcpts = np.log1p(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log1p(x)
|
Natural logarithm of (1 + x)
|
entailment
|
def radians(x):
"""
Convert degrees to radians
"""
if isinstance(x, UncertainFunction):
mcpts = np.radians(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.radians(x)
|
Convert degrees to radians
|
entailment
|
def sin(x):
"""
Sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.sin(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sin(x)
|
Sine
|
entailment
|
def sinh(x):
"""
Hyperbolic sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.sinh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sinh(x)
|
Hyperbolic sine
|
entailment
|
def sqrt(x):
"""
Square-root function
"""
if isinstance(x, UncertainFunction):
mcpts = np.sqrt(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sqrt(x)
|
Square-root function
|
entailment
|
def tan(x):
"""
Tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.tan(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.tan(x)
|
Tangent
|
entailment
|
def tanh(x):
"""
Hyperbolic tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.tanh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.tanh(x)
|
Hyperbolic tangent
|
entailment
|
def trunc(x):
"""
Truncate the values to the integer value without rounding
"""
if isinstance(x, UncertainFunction):
mcpts = np.trunc(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.trunc(x)
|
Truncate the values to the integer value without rounding
|
entailment
|
def lhd(
dist=None,
size=None,
dims=1,
form="randomized",
iterations=100,
showcorrelations=False,
):
"""
Create a Latin-Hypercube sample design based on distributions defined in the
`scipy.stats` module
Parameters
----------
dist: array_like
frozen scipy.stats.rv_continuous or rv_discrete distribution objects
that are defined previous to calling LHD
size: int
integer value for the number of samples to generate for each
distribution object
dims: int, optional
if dist is a single distribution object, and dims > 1, the one
distribution will be used to generate a size-by-dims sampled design
form: str, optional (non-functional at the moment)
determines how the sampling is to occur, with the following optional
values:
- 'randomized' - completely randomized sampling
- 'spacefilling' - space-filling sampling (generally gives a more
accurate sampling of the design when the number of sample points
is small)
- 'orthogonal' - balanced space-filling sampling (experimental)
The 'spacefilling' and 'orthogonal' forms require some iterations to
determine the optimal sampling pattern.
iterations: int, optional (non-functional at the moment)
used to control the number of allowable search iterations for generating
'spacefilling' and 'orthogonal' designs
Returns
-------
out: 2d-array,
A 2d-array where each column corresponds to each input distribution and
each row is a sample in the design
Examples
--------
Single distribution:
- uniform distribution, low = -1, width = 2
>>> import scipy.stats as ss
>>> d0 = ss.uniform(loc=-1,scale=2)
>>> print lhd(dist=d0,size=5)
[[ 0.51031081]
[-0.28961427]
[-0.68342107]
[ 0.69784371]
[ 0.12248842]]
Single distribution for multiple variables:
- normal distribution, mean = 0, stdev = 1
>>> d1 = ss.norm(loc=0,scale=1)
>>> print lhd(dist=d1,size=7,dims=5)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
Multiple distributions:
- beta distribution, alpha = 2, beta = 5
- exponential distribution, lambda = 1.5
>>> d2 = ss.beta(2,5)
>>> d3 = ss.expon(scale=1/1.5)
>>> print lhd(dist=(d1,d2,d3),size=6)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
"""
assert dims > 0, 'kwarg "dims" must be at least 1'
if not size or not dist:
return None
def _lhs(x, samples=20):
"""
_lhs(x) returns a latin-hypercube matrix (each row is a different
set of sample inputs) using a default sample size of 20 for each column
of X. X must be a 2xN matrix that contains the lower and upper bounds of
each column. The lower bound(s) should be in the first row and the upper
bound(s) should be in the second row.
_lhs(x,samples=N) uses the sample size of N instead of the default (20).
Example:
>>> x = np.array([[0,-1,3],[1,2,6]])
>>> print 'x:'; print x
x:
[[ 0 -1 3]
[ 1 2 6]]
>>> print 'lhs(x):'; print _lhs(x)
lhs(x):
[[ 0.02989122 -0.93918734 3.14432618]
[ 0.08869833 -0.82140706 3.19875152]
[ 0.10627442 -0.66999234 3.33814979]
[ 0.15202861 -0.44157763 3.57036894]
[ 0.2067089 -0.34845384 3.66930908]
[ 0.26542056 -0.23706445 3.76361414]
[ 0.34201421 -0.00779306 3.90818257]
[ 0.37891646 0.15458423 4.15031708]
[ 0.43501575 0.23561118 4.20320064]
[ 0.4865449 0.36350601 4.45792314]
[ 0.54804367 0.56069855 4.60911539]
[ 0.59400712 0.7468415 4.69923486]
[ 0.63708876 0.9159176 4.83611204]
[ 0.68819855 0.98596354 4.97659182]
[ 0.7368695 1.18923511 5.11135111]
[ 0.78885724 1.28369441 5.2900157 ]
[ 0.80966513 1.47415703 5.4081971 ]
[ 0.86196731 1.57844205 5.61067689]
[ 0.94784517 1.71823504 5.78021164]
[ 0.96739728 1.94169017 5.88604772]]
>>> print 'lhs(x,samples=5):'; print _lhs(x,samples=5)
lhs(x,samples=5):
[[ 0.1949127 -0.54124725 3.49238369]
[ 0.21128576 -0.13439798 3.65652016]
[ 0.47516308 0.39957406 4.5797308 ]
[ 0.64400392 0.90890999 4.92379431]
[ 0.96279472 1.79415307 5.52028238]]
"""
# determine the segment size
segmentSize = 1.0 / samples
# get the number of dimensions to sample (number of columns)
numVars = x.shape[1]
# populate each dimension
out = np.zeros((samples, numVars))
pointValue = np.zeros(samples)
for n in range(numVars):
for i in range(samples):
segmentMin = i * segmentSize
point = segmentMin + (np.random.random() * segmentSize)
pointValue[i] = (point * (x[1, n] - x[0, n])) + x[0, n]
out[:, n] = pointValue
# now randomly arrange the different segments
return _mix(out)
def _mix(data, dim="cols"):
"""
Takes a data matrix and mixes up the values along dim (either "rows" or
"cols"). In other words, if dim='rows', then each row's data is mixed
ONLY WITHIN ITSELF. Likewise, if dim='cols', then each column's data is
mixed ONLY WITHIN ITSELF.
"""
data = np.atleast_2d(data)
n = data.shape[0]
if dim == "rows":
data = data.T
data_rank = list(range(n))
for i in range(data.shape[1]):
new_data_rank = np.random.permutation(data_rank)
vals, order = np.unique(
np.hstack((data_rank, new_data_rank)), return_inverse=True
)
old_order = order[:n]
new_order = order[-n:]
tmp = data[np.argsort(old_order), i][new_order]
data[:, i] = tmp[:]
if dim == "rows":
data = data.T
return data
if form is "randomized":
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "spacefilling":
def euclid_distance(arr):
n = arr.shape[0]
ans = 0.0
for i in range(n - 1):
for j in range(i + 1, n):
d = np.sqrt(
np.sum(
[(arr[i, k] - arr[j, k]) ** 2 for k in range(arr.shape[1])]
)
)
ans += 1.0 / d ** 2
return ans
def fill_space(data):
best = 1e8
for it in range(iterations):
d = euclid_distance(data)
if d < best:
d_opt = d
data_opt = data.copy()
data = _mix(data)
print("Optimized Distance:", d_opt)
return data_opt
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "orthogonal":
raise NotImplementedError(
"Sorry. The orthogonal space-filling algorithm hasn't been implemented yet."
)
else:
raise ValueError('Invalid "form" value: %s' % (form))
if dist_data.shape[1] > 1:
cor_matrix = np.zeros((nvars, nvars))
for i in range(nvars):
for j in range(nvars):
x_data = dist_data[:, i].copy()
y_data = dist_data[:, j].copy()
x_mean = x_data.mean()
y_mean = y_data.mean()
num = np.sum((x_data - x_mean) * (y_data - y_mean))
den = np.sqrt(
np.sum((x_data - x_mean) ** 2) * np.sum((y_data - y_mean) ** 2)
)
cor_matrix[i, j] = num / den
cor_matrix[j, i] = num / den
inv_cor_matrix = np.linalg.pinv(cor_matrix)
VIF = np.max(np.diag(inv_cor_matrix))
if showcorrelations:
print("Correlation Matrix:\n", cor_matrix)
print("Inverted Correlation Matrix:\n", inv_cor_matrix)
print("Variance Inflation Factor (VIF):", VIF)
return dist_data
|
Create a Latin-Hypercube sample design based on distributions defined in the
`scipy.stats` module
Parameters
----------
dist: array_like
frozen scipy.stats.rv_continuous or rv_discrete distribution objects
that are defined previous to calling LHD
size: int
integer value for the number of samples to generate for each
distribution object
dims: int, optional
if dist is a single distribution object, and dims > 1, the one
distribution will be used to generate a size-by-dims sampled design
form: str, optional (non-functional at the moment)
determines how the sampling is to occur, with the following optional
values:
- 'randomized' - completely randomized sampling
- 'spacefilling' - space-filling sampling (generally gives a more
accurate sampling of the design when the number of sample points
is small)
- 'orthogonal' - balanced space-filling sampling (experimental)
The 'spacefilling' and 'orthogonal' forms require some iterations to
determine the optimal sampling pattern.
iterations: int, optional (non-functional at the moment)
used to control the number of allowable search iterations for generating
'spacefilling' and 'orthogonal' designs
Returns
-------
out: 2d-array,
A 2d-array where each column corresponds to each input distribution and
each row is a sample in the design
Examples
--------
Single distribution:
- uniform distribution, low = -1, width = 2
>>> import scipy.stats as ss
>>> d0 = ss.uniform(loc=-1,scale=2)
>>> print lhd(dist=d0,size=5)
[[ 0.51031081]
[-0.28961427]
[-0.68342107]
[ 0.69784371]
[ 0.12248842]]
Single distribution for multiple variables:
- normal distribution, mean = 0, stdev = 1
>>> d1 = ss.norm(loc=0,scale=1)
>>> print lhd(dist=d1,size=7,dims=5)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
Multiple distributions:
- beta distribution, alpha = 2, beta = 5
- exponential distribution, lambda = 1.5
>>> d2 = ss.beta(2,5)
>>> d3 = ss.expon(scale=1/1.5)
>>> print lhd(dist=(d1,d2,d3),size=6)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
|
entailment
|
def to_uncertain_func(x):
"""
Transforms x into an UncertainFunction-compatible object,
unless it is already an UncertainFunction (in which case x is returned
unchanged).
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on UncertainFunction objects
(which then cannot be considered as constants).
"""
if isinstance(x, UncertainFunction):
return x
# ! In Python 2.6+, numbers.Number could be used instead, here:
elif isinstance(x, CONSTANT_TYPES):
# No variable => no derivative to define:
return UncertainFunction([x] * npts)
raise NotUpcast("%s cannot be converted to a number with" " uncertainty" % type(x))
|
Transforms x into an UncertainFunction-compatible object,
unless it is already an UncertainFunction (in which case x is returned
unchanged).
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on UncertainFunction objects
(which then cannot be considered as constants).
|
entailment
|
def Beta(alpha, beta, low=0, high=1, tag=None):
"""
A Beta random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
Optional
--------
low : scalar
Lower bound of the distribution support (default=0)
high : scalar
Upper bound of the distribution support (default=1)
"""
assert (
alpha > 0 and beta > 0
), 'Beta "alpha" and "beta" parameters must be greater than zero'
assert low < high, 'Beta "low" must be less than "high"'
return uv(ss.beta(alpha, beta, loc=low, scale=high - low), tag=tag)
|
A Beta random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
Optional
--------
low : scalar
Lower bound of the distribution support (default=0)
high : scalar
Upper bound of the distribution support (default=1)
|
entailment
|
def BetaPrime(alpha, beta, tag=None):
"""
A BetaPrime random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
"""
assert (
alpha > 0 and beta > 0
), 'BetaPrime "alpha" and "beta" parameters must be greater than zero'
x = Beta(alpha, beta, tag)
return x / (1 - x)
|
A BetaPrime random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
|
entailment
|
def Bradford(q, low=0, high=1, tag=None):
"""
A Bradford random variate
Parameters
----------
q : scalar
The shape parameter
low : scalar
The lower bound of the distribution (default=0)
high : scalar
The upper bound of the distribution (default=1)
"""
assert q > 0, 'Bradford "q" parameter must be greater than zero'
assert low < high, 'Bradford "low" parameter must be less than "high"'
return uv(ss.bradford(q, loc=low, scale=high - low), tag=tag)
|
A Bradford random variate
Parameters
----------
q : scalar
The shape parameter
low : scalar
The lower bound of the distribution (default=0)
high : scalar
The upper bound of the distribution (default=1)
|
entailment
|
def Burr(c, k, tag=None):
"""
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
"""
assert c > 0 and k > 0, 'Burr "c" and "k" parameters must be greater than zero'
return uv(ss.burr(c, k), tag=tag)
|
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
|
entailment
|
def ChiSquared(k, tag=None):
"""
A Chi-Squared random variate
Parameters
----------
k : int
The degrees of freedom of the distribution (must be greater than one)
"""
assert int(k) == k and k >= 1, 'Chi-Squared "k" must be an integer greater than 0'
return uv(ss.chi2(k), tag=tag)
|
A Chi-Squared random variate
Parameters
----------
k : int
The degrees of freedom of the distribution (must be greater than one)
|
entailment
|
def Erlang(k, lamda, tag=None):
"""
An Erlang random variate.
This distribution is the same as a Gamma(k, theta) distribution, but
with the restriction that k must be a positive integer. This
is provided for greater compatibility with other simulation tools, but
provides no advantage over the Gamma distribution in its applications.
Parameters
----------
k : int
The shape parameter (must be a positive integer)
lamda : scalar
The scale parameter (must be greater than zero)
"""
assert int(k) == k and k > 0, 'Erlang "k" must be a positive integer'
assert lamda > 0, 'Erlang "lamda" must be greater than zero'
return Gamma(k, lamda, tag)
|
An Erlang random variate.
This distribution is the same as a Gamma(k, theta) distribution, but
with the restriction that k must be a positive integer. This
is provided for greater compatibility with other simulation tools, but
provides no advantage over the Gamma distribution in its applications.
Parameters
----------
k : int
The shape parameter (must be a positive integer)
lamda : scalar
The scale parameter (must be greater than zero)
|
entailment
|
def Exponential(lamda, tag=None):
"""
An Exponential random variate
Parameters
----------
lamda : scalar
The inverse scale (as shown on Wikipedia). (FYI: mu = 1/lamda.)
"""
assert lamda > 0, 'Exponential "lamda" must be greater than zero'
return uv(ss.expon(scale=1.0 / lamda), tag=tag)
|
An Exponential random variate
Parameters
----------
lamda : scalar
The inverse scale (as shown on Wikipedia). (FYI: mu = 1/lamda.)
|
entailment
|
def ExtValueMax(mu, sigma, tag=None):
"""
An Extreme Value Maximum random variate.
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be greater than zero)
"""
assert sigma > 0, 'ExtremeValueMax "sigma" must be greater than zero'
p = U(0, 1)._mcpts[:]
return UncertainFunction(mu - sigma * np.log(-np.log(p)), tag=tag)
|
An Extreme Value Maximum random variate.
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be greater than zero)
|
entailment
|
def Fisher(d1, d2, tag=None):
"""
An F (fisher) random variate
Parameters
----------
d1 : int
Numerator degrees of freedom
d2 : int
Denominator degrees of freedom
"""
assert (
int(d1) == d1 and d1 >= 1
), 'Fisher (F) "d1" must be an integer greater than 0'
assert (
int(d2) == d2 and d2 >= 1
), 'Fisher (F) "d2" must be an integer greater than 0'
return uv(ss.f(d1, d2), tag=tag)
|
An F (fisher) random variate
Parameters
----------
d1 : int
Numerator degrees of freedom
d2 : int
Denominator degrees of freedom
|
entailment
|
def Gamma(k, theta, tag=None):
"""
A Gamma random variate
Parameters
----------
k : scalar
The shape parameter (must be positive and non-zero)
theta : scalar
The scale parameter (must be positive and non-zero)
"""
assert (
k > 0 and theta > 0
), 'Gamma "k" and "theta" parameters must be greater than zero'
return uv(ss.gamma(k, scale=theta), tag=tag)
|
A Gamma random variate
Parameters
----------
k : scalar
The shape parameter (must be positive and non-zero)
theta : scalar
The scale parameter (must be positive and non-zero)
|
entailment
|
def LogNormal(mu, sigma, tag=None):
"""
A Log-Normal random variate
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be positive and non-zero)
"""
assert sigma > 0, 'Log-Normal "sigma" must be positive'
return uv(ss.lognorm(sigma, loc=mu), tag=tag)
|
A Log-Normal random variate
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be positive and non-zero)
|
entailment
|
def Normal(mu, sigma, tag=None):
"""
A Normal (or Gaussian) random variate
Parameters
----------
mu : scalar
The mean value of the distribution
sigma : scalar
The standard deviation (must be positive and non-zero)
"""
assert sigma > 0, 'Normal "sigma" must be greater than zero'
return uv(ss.norm(loc=mu, scale=sigma), tag=tag)
|
A Normal (or Gaussian) random variate
Parameters
----------
mu : scalar
The mean value of the distribution
sigma : scalar
The standard deviation (must be positive and non-zero)
|
entailment
|
def Pareto(q, a, tag=None):
"""
A Pareto random variate (first kind)
Parameters
----------
q : scalar
The scale parameter
a : scalar
The shape parameter (the minimum possible value)
"""
assert q > 0 and a > 0, 'Pareto "q" and "a" must be positive scalars'
p = Uniform(0, 1, tag)
return a * (1 - p) ** (-1.0 / q)
|
A Pareto random variate (first kind)
Parameters
----------
q : scalar
The scale parameter
a : scalar
The shape parameter (the minimum possible value)
|
entailment
|
def Pareto2(q, b, tag=None):
"""
A Pareto random variate (second kind). This form always starts at the
origin.
Parameters
----------
q : scalar
The scale parameter
b : scalar
The shape parameter
"""
assert q > 0 and b > 0, 'Pareto2 "q" and "b" must be positive scalars'
return Pareto(q, b, tag) - b
|
A Pareto random variate (second kind). This form always starts at the
origin.
Parameters
----------
q : scalar
The scale parameter
b : scalar
The shape parameter
|
entailment
|
def PERT(low, peak, high, g=4.0, tag=None):
"""
A PERT random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the distribution's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
Optional
--------
g : scalar
Controls the uncertainty of the distribution around the peak. Smaller
values make the distribution flatter and more uncertain around the
peak while larger values make it focused and less uncertain around
the peak. (Default: 4)
"""
a, b, c = [float(x) for x in [low, peak, high]]
assert a <= b <= c, 'PERT "peak" must be greater than "low" and less than "high"'
assert g >= 0, 'PERT "g" must be non-negative'
mu = (a + g * b + c) / (g + 2)
if mu == b:
a1 = a2 = 3.0
else:
a1 = ((mu - a) * (2 * b - a - c)) / ((b - mu) * (c - a))
a2 = a1 * (c - mu) / (mu - a)
return Beta(a1, a2, a, c, tag)
|
A PERT random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the distribution's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
Optional
--------
g : scalar
Controls the uncertainty of the distribution around the peak. Smaller
values make the distribution flatter and more uncertain around the
peak while larger values make it focused and less uncertain around
the peak. (Default: 4)
|
entailment
|
def StudentT(v, tag=None):
"""
A Student-T random variate
Parameters
----------
v : int
The degrees of freedom of the distribution (must be greater than one)
"""
assert int(v) == v and v >= 1, 'Student-T "v" must be an integer greater than 0'
return uv(ss.t(v), tag=tag)
|
A Student-T random variate
Parameters
----------
v : int
The degrees of freedom of the distribution (must be greater than one)
|
entailment
|
def Triangular(low, peak, high, tag=None):
"""
A triangular random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the triangle's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
"""
assert low <= peak <= high, 'Triangular "peak" must lie between "low" and "high"'
low, peak, high = [float(x) for x in [low, peak, high]]
return uv(
ss.triang((1.0 * peak - low) / (high - low), loc=low, scale=(high - low)),
tag=tag,
)
|
A triangular random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the triangle's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
|
entailment
|
def Uniform(low, high, tag=None):
"""
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
"""
assert low < high, 'Uniform "low" must be less than "high"'
return uv(ss.uniform(loc=low, scale=high - low), tag=tag)
|
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
|
entailment
|
def Weibull(lamda, k, tag=None):
"""
A Weibull random variate
Parameters
----------
lamda : scalar
The scale parameter
k : scalar
The shape parameter
"""
assert (
lamda > 0 and k > 0
), 'Weibull "lamda" and "k" parameters must be greater than zero'
return uv(ss.exponweib(lamda, k), tag=tag)
|
A Weibull random variate
Parameters
----------
lamda : scalar
The scale parameter
k : scalar
The shape parameter
|
entailment
|
def Bernoulli(p, tag=None):
"""
A Bernoulli random variate
Parameters
----------
p : scalar
The probability of success
"""
assert (
0 < p < 1
), 'Bernoulli probability "p" must be between zero and one, non-inclusive'
return uv(ss.bernoulli(p), tag=tag)
|
A Bernoulli random variate
Parameters
----------
p : scalar
The probability of success
|
entailment
|
def Binomial(n, p, tag=None):
"""
A Binomial random variate
Parameters
----------
n : int
The number of trials
p : scalar
The probability of success
"""
assert (
int(n) == n and n > 0
), 'Binomial number of trials "n" must be an integer greater than zero'
assert (
0 < p < 1
), 'Binomial probability "p" must be between zero and one, non-inclusive'
return uv(ss.binom(n, p), tag=tag)
|
A Binomial random variate
Parameters
----------
n : int
The number of trials
p : scalar
The probability of success
|
entailment
|
def Geometric(p, tag=None):
"""
A Geometric random variate
Parameters
----------
p : scalar
The probability of success
"""
assert (
0 < p < 1
), 'Geometric probability "p" must be between zero and one, non-inclusive'
return uv(ss.geom(p), tag=tag)
|
A Geometric random variate
Parameters
----------
p : scalar
The probability of success
|
entailment
|
def Hypergeometric(N, n, K, tag=None):
"""
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
"""
assert (
int(N) == N and N > 0
), 'Hypergeometric total population size "N" must be an integer greater than zero.'
assert (
int(n) == n and 0 < n <= N
), 'Hypergeometric interest population size "n" must be an integer greater than zero and no more than the total population size.'
assert (
int(K) == K and 0 < K <= N
), 'Hypergeometric chosen population size "K" must be an integer greater than zero and no more than the total population size.'
return uv(ss.hypergeom(N, n, K), tag=tag)
|
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
|
entailment
|
def Poisson(lamda, tag=None):
"""
A Poisson random variate
Parameters
----------
lamda : scalar
The rate of an occurance within a specified interval of time or space.
"""
assert lamda > 0, 'Poisson "lamda" must be greater than zero.'
return uv(ss.poisson(lamda), tag=tag)
|
A Poisson random variate
Parameters
----------
lamda : scalar
The rate of an occurance within a specified interval of time or space.
|
entailment
|
def covariance_matrix(nums_with_uncert):
"""
Calculate the covariance matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
cov_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> covariance_matrix([x,y,z])
[[ 9.99694861e-03 2.54000840e-05 1.00477488e-02]
[ 2.54000840e-05 9.99823207e-03 2.00218642e-02]
[ 1.00477488e-02 2.00218642e-02 5.00914772e-02]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
cov_matrix = []
for (i1, expr1) in enumerate(ufuncs):
coefs_expr1 = []
mean1 = expr1.mean
for (i2, expr2) in enumerate(ufuncs[: i1 + 1]):
mean2 = expr2.mean
coef = np.mean((expr1._mcpts - mean1) * (expr2._mcpts - mean2))
coefs_expr1.append(coef)
cov_matrix.append(coefs_expr1)
# We symmetrize the matrix:
for (i, covariance_coefs) in enumerate(cov_matrix):
covariance_coefs.extend(cov_matrix[j][i] for j in range(i + 1, len(cov_matrix)))
return cov_matrix
|
Calculate the covariance matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
cov_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> covariance_matrix([x,y,z])
[[ 9.99694861e-03 2.54000840e-05 1.00477488e-02]
[ 2.54000840e-05 9.99823207e-03 2.00218642e-02]
[ 1.00477488e-02 2.00218642e-02 5.00914772e-02]]
|
entailment
|
def correlation_matrix(nums_with_uncert):
"""
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
data = np.vstack([ufunc._mcpts for ufunc in ufuncs])
return np.corrcoef(data.T, rowvar=0)
|
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
|
entailment
|
def var(self):
"""
Variance value as a result of an uncertainty calculation
"""
mn = self.mean
vr = np.mean((self._mcpts - mn) ** 2)
return vr
|
Variance value as a result of an uncertainty calculation
|
entailment
|
def skew(self):
r"""
Skewness coefficient value as a result of an uncertainty calculation,
defined as::
_____ m3
\/beta1 = ------
std**3
where m3 is the third central moment and std is the standard deviation
"""
mn = self.mean
sd = self.std
sk = 0.0 if abs(sd) <= 1e-8 else np.mean((self._mcpts - mn) ** 3) / sd ** 3
return sk
|
r"""
Skewness coefficient value as a result of an uncertainty calculation,
defined as::
_____ m3
\/beta1 = ------
std**3
where m3 is the third central moment and std is the standard deviation
|
entailment
|
def kurt(self):
"""
Kurtosis coefficient value as a result of an uncertainty calculation,
defined as::
m4
beta2 = ------
std**4
where m4 is the fourth central moment and std is the standard deviation
"""
mn = self.mean
sd = self.std
kt = 0.0 if abs(sd) <= 1e-8 else np.mean((self._mcpts - mn) ** 4) / sd ** 4
return kt
|
Kurtosis coefficient value as a result of an uncertainty calculation,
defined as::
m4
beta2 = ------
std**4
where m4 is the fourth central moment and std is the standard deviation
|
entailment
|
def stats(self):
"""
The first four standard moments of a distribution: mean, variance, and
standardized skewness and kurtosis coefficients.
"""
mn = self.mean
vr = self.var
sk = self.skew
kt = self.kurt
return [mn, vr, sk, kt]
|
The first four standard moments of a distribution: mean, variance, and
standardized skewness and kurtosis coefficients.
|
entailment
|
def percentile(self, val):
"""
Get the distribution value at a given percentile or set of percentiles.
This follows the NIST method for calculating percentiles.
Parameters
----------
val : scalar or array
Either a single value or an array of values between 0 and 1.
Returns
-------
out : scalar or array
The actual distribution value that appears at the requested
percentile value or values
"""
try:
# test to see if an input is given as an array
out = [self.percentile(vi) for vi in val]
except (ValueError, TypeError):
if val <= 0:
out = float(min(self._mcpts))
elif val >= 1:
out = float(max(self._mcpts))
else:
tmp = np.sort(self._mcpts)
n = val * (len(tmp) + 1)
k, d = int(n), n - int(n)
out = float(tmp[k] + d * (tmp[k + 1] - tmp[k]))
if isinstance(val, np.ndarray):
out = np.array(out)
return out
|
Get the distribution value at a given percentile or set of percentiles.
This follows the NIST method for calculating percentiles.
Parameters
----------
val : scalar or array
Either a single value or an array of values between 0 and 1.
Returns
-------
out : scalar or array
The actual distribution value that appears at the requested
percentile value or values
|
entailment
|
def describe(self, name=None):
"""
Cleanly show what the four displayed distribution moments are:
- Mean
- Variance
- Standardized Skewness Coefficient
- Standardized Kurtosis Coefficient
For a standard Normal distribution, these are [0, 1, 0, 3].
If the object has an associated tag, this is presented. If the optional
``name`` kwarg is utilized, this is presented as with the moments.
Otherwise, no unique name is presented.
Example
=======
::
>>> x = N(0, 1, 'x')
>>> x.describe() # print tag since assigned
MCERP Uncertain Value (x):
...
>>> x.describe('foobar') # 'name' kwarg takes precedence
MCERP Uncertain Value (foobar):
...
>>> y = x**2
>>> y.describe('y') # print name since assigned
MCERP Uncertain Value (y):
...
>>> y.describe() # print nothing since no tag
MCERP Uncertain Value:
...
"""
mn, vr, sk, kt = self.stats
if name is not None:
s = "MCERP Uncertain Value (" + name + "):\n"
elif self.tag is not None:
s = "MCERP Uncertain Value (" + self.tag + "):\n"
else:
s = "MCERP Uncertain Value:\n"
s += " > Mean................... {: }\n".format(mn)
s += " > Variance............... {: }\n".format(vr)
s += " > Skewness Coefficient... {: }\n".format(sk)
s += " > Kurtosis Coefficient... {: }\n".format(kt)
print(s)
|
Cleanly show what the four displayed distribution moments are:
- Mean
- Variance
- Standardized Skewness Coefficient
- Standardized Kurtosis Coefficient
For a standard Normal distribution, these are [0, 1, 0, 3].
If the object has an associated tag, this is presented. If the optional
``name`` kwarg is utilized, this is presented as with the moments.
Otherwise, no unique name is presented.
Example
=======
::
>>> x = N(0, 1, 'x')
>>> x.describe() # print tag since assigned
MCERP Uncertain Value (x):
...
>>> x.describe('foobar') # 'name' kwarg takes precedence
MCERP Uncertain Value (foobar):
...
>>> y = x**2
>>> y.describe('y') # print name since assigned
MCERP Uncertain Value (y):
...
>>> y.describe() # print nothing since no tag
MCERP Uncertain Value:
...
|
entailment
|
def plot(self, hist=False, show=False, **kwargs):
"""
Plot the distribution of the UncertainFunction. By default, the
distribution is shown with a kernel density estimate (kde).
Optional
--------
hist : bool
If true, a density histogram is displayed (histtype='stepfilled')
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot or .hist kwarg
"""
import matplotlib.pyplot as plt
vals = self._mcpts
low = min(vals)
high = max(vals)
p = ss.kde.gaussian_kde(vals)
xp = np.linspace(low, high, 100)
if hist:
h = plt.hist(
vals,
bins=int(np.sqrt(len(vals)) + 0.5),
histtype="stepfilled",
normed=True,
**kwargs
)
plt.ylim(0, 1.1 * h[0].max())
else:
plt.plot(xp, p.evaluate(xp), **kwargs)
plt.xlim(low - (high - low) * 0.1, high + (high - low) * 0.1)
if show:
self.show()
|
Plot the distribution of the UncertainFunction. By default, the
distribution is shown with a kernel density estimate (kde).
Optional
--------
hist : bool
If true, a density histogram is displayed (histtype='stepfilled')
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot or .hist kwarg
|
entailment
|
def plot(self, hist=False, show=False, **kwargs):
"""
Plot the distribution of the UncertainVariable. Continuous
distributions are plotted with a line plot and discrete distributions
are plotted with discrete circles.
Optional
--------
hist : bool
If true, a histogram is displayed
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot kwarg
"""
import matplotlib.pyplot as plt
if hist:
vals = self._mcpts
low = vals.min()
high = vals.max()
h = plt.hist(
vals,
bins=int(np.sqrt(len(vals)) + 0.5),
histtype="stepfilled",
normed=True,
**kwargs
)
plt.ylim(0, 1.1 * h[0].max())
else:
bound = 0.0001
low = self.rv.ppf(bound)
high = self.rv.ppf(1 - bound)
if hasattr(self.rv.dist, "pmf"):
low = int(low)
high = int(high)
vals = list(range(low, high + 1))
plt.plot(vals, self.rv.pmf(vals), "o", **kwargs)
else:
vals = np.linspace(low, high, 500)
plt.plot(vals, self.rv.pdf(vals), **kwargs)
plt.xlim(low - (high - low) * 0.1, high + (high - low) * 0.1)
if show:
self.show()
|
Plot the distribution of the UncertainVariable. Continuous
distributions are plotted with a line plot and discrete distributions
are plotted with discrete circles.
Optional
--------
hist : bool
If true, a histogram is displayed
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot kwarg
|
entailment
|
def load_hat(self, path): # pylint: disable=no-self-use
"""Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
"""
hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if hat is None:
raise ValueError('No hat image found at `{}`'.format(path))
b, g, r, a = cv2.split(hat)
return cv2.merge((r, g, b, a))
|
Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
|
entailment
|
def find_faces(self, image, draw_box=False):
"""Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
"""
frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
faces = self.cascade.detectMultiScale(
frame_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
flags=0)
if draw_box:
for x, y, w, h in faces:
cv2.rectangle(image, (x, y),
(x + w, y + h), (0, 255, 0), 2)
return faces
|
Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
|
entailment
|
def find_resources(self, rsrc_type, sort=None, yield_pages=False, **kwargs):
"""Find instances of `rsrc_type` that match the filter in `**kwargs`"""
return rsrc_type.find(self, sort=sort, yield_pages=yield_pages, **kwargs)
|
Find instances of `rsrc_type` that match the filter in `**kwargs`
|
entailment
|
def changed(self, message=None, *args):
"""Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
"""
if message is not None:
self.logger.debug('%s: %s', self._repr(), message % args)
self.logger.debug('%s: changed', self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed()
|
Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
|
entailment
|
def register(cls, origin_type):
"""Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
"""
def decorator(tracked_type):
"""Adds the decorated class to the `_type_mapping` dictionary."""
cls._type_mapping[origin_type] = tracked_type
return tracked_type
return decorator
|
Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
|
entailment
|
def convert(cls, obj, parent):
"""Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
"""
replacement_type = cls._type_mapping.get(type(obj))
if replacement_type is not None:
new = replacement_type(obj)
new.parent = parent
return new
return obj
|
Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.