sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def update_params(self, parameters):
"""Pass in a dictionary to update url parameters for NBA stats API
Parameters
----------
parameters : dict
A dict containing key, value pairs that correspond with NBA stats
API parameters.
Returns
-------
self : TeamLog
The TeamLog object containing the updated NBA stats API
parameters.
"""
self.url_paramaters.update(parameters)
self.response = requests.get(self.base_url, params=self.url_paramaters,
headers=HEADERS)
# raise error if status code is not 200
self.response.raise_for_status()
return self
|
Pass in a dictionary to update url parameters for NBA stats API
Parameters
----------
parameters : dict
A dict containing key, value pairs that correspond with NBA stats
API parameters.
Returns
-------
self : TeamLog
The TeamLog object containing the updated NBA stats API
parameters.
|
entailment
|
def get_shots(self):
"""Returns the shot chart data as a pandas DataFrame."""
shots = self.response.json()['resultSets'][0]['rowSet']
headers = self.response.json()['resultSets'][0]['headers']
return pd.DataFrame(shots, columns=headers)
|
Returns the shot chart data as a pandas DataFrame.
|
entailment
|
def connect(self):
"""
Connect will attempt to connect to the NATS server. The url can
contain username/password semantics.
"""
self._build_socket()
self._connect_socket()
self._build_file_socket()
self._send_connect_msg()
|
Connect will attempt to connect to the NATS server. The url can
contain username/password semantics.
|
entailment
|
def subscribe(self, subject, callback, queue=''):
"""
Subscribe will express interest in the given subject. The subject can
have wildcards (partial:*, full:>). Messages will be delivered to the
associated callback.
Args:
subject (string): a string with the subject
callback (function): callback to be called
"""
s = Subscription(
sid=self._next_sid,
subject=subject,
queue=queue,
callback=callback,
connetion=self
)
self._subscriptions[s.sid] = s
self._send('SUB %s %s %d' % (s.subject, s.queue, s.sid))
self._next_sid += 1
return s
|
Subscribe will express interest in the given subject. The subject can
have wildcards (partial:*, full:>). Messages will be delivered to the
associated callback.
Args:
subject (string): a string with the subject
callback (function): callback to be called
|
entailment
|
def unsubscribe(self, subscription, max=None):
"""
Unsubscribe will remove interest in the given subject. If max is
provided an automatic Unsubscribe that is processed by the server
when max messages have been received
Args:
subscription (pynats.Subscription): a Subscription object
max (int=None): number of messages
"""
if max is None:
self._send('UNSUB %d' % subscription.sid)
self._subscriptions.pop(subscription.sid)
else:
subscription.max = max
self._send('UNSUB %d %s' % (subscription.sid, max))
|
Unsubscribe will remove interest in the given subject. If max is
provided an automatic Unsubscribe that is processed by the server
when max messages have been received
Args:
subscription (pynats.Subscription): a Subscription object
max (int=None): number of messages
|
entailment
|
def publish(self, subject, msg, reply=None):
"""
Publish publishes the data argument to the given subject.
Args:
subject (string): a string with the subject
msg (string): payload string
reply (string): subject used in the reply
"""
if msg is None:
msg = ''
if reply is None:
command = 'PUB %s %d' % (subject, len(msg))
else:
command = 'PUB %s %s %d' % (subject, reply, len(msg))
self._send(command)
self._send(msg)
|
Publish publishes the data argument to the given subject.
Args:
subject (string): a string with the subject
msg (string): payload string
reply (string): subject used in the reply
|
entailment
|
def request(self, subject, callback, msg=None):
"""
ublish a message with an implicit inbox listener as the reply.
Message is optional.
Args:
subject (string): a string with the subject
callback (function): callback to be called
msg (string=None): payload string
"""
inbox = self._build_inbox()
s = self.subscribe(inbox, callback)
self.unsubscribe(s, 1)
self.publish(subject, msg, inbox)
return s
|
ublish a message with an implicit inbox listener as the reply.
Message is optional.
Args:
subject (string): a string with the subject
callback (function): callback to be called
msg (string=None): payload string
|
entailment
|
def wait(self, duration=None, count=0):
"""
Publish publishes the data argument to the given subject.
Args:
duration (float): will wait for the given number of seconds
count (count): stop of wait after n messages from any subject
"""
start = time.time()
total = 0
while True:
type, result = self._recv(MSG, PING, OK)
if type is MSG:
total += 1
if self._handle_msg(result) is False:
break
if count and total >= count:
break
elif type is PING:
self._handle_ping()
if duration and time.time() - start > duration:
break
|
Publish publishes the data argument to the given subject.
Args:
duration (float): will wait for the given number of seconds
count (count): stop of wait after n messages from any subject
|
entailment
|
def draw_court(ax=None, color='gray', lw=1, outer_lines=False):
"""Returns an axes with a basketball court drawn onto to it.
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
ax : Axes, optional
The Axes object to plot the court onto.
color : matplotlib color, optional
The color of the court lines.
lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If `True` it draws the out of bound lines in same style as the rest of
the court.
Returns
-------
ax : Axes
The Axes object with the court on it.
"""
if ax is None:
ax = plt.gca()
# Create the various parts of an NBA basketball court
# Create the basketball hoop
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
# Create backboard
backboard = Rectangle((-30, -12.5), 60, 0, linewidth=lw, color=color)
# The paint
# Create the outer box 0f the paint, width=16ft, height=19ft
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
# Create the inner box of the paint, widt=12ft, height=19ft
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
# Create free throw top arc
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
# Create free throw bottom arc
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
# Restricted Zone, it is an arc with 4ft radius from center of the hoop
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
# Three point line
# Create the right side 3pt lines, it's 14ft long before it arcs
corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw,
color=color)
# Create the right side 3pt lines, it's 14ft long before it arcs
corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color)
# 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
# Center Court
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
# List of the court elements to be plotted onto the axes
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
# Draw the half court line, baseline and side out bound lines
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
# Add the court elements onto the axes
for element in court_elements:
ax.add_patch(element)
return ax
|
Returns an axes with a basketball court drawn onto to it.
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
ax : Axes, optional
The Axes object to plot the court onto.
color : matplotlib color, optional
The color of the court lines.
lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If `True` it draws the out of bound lines in same style as the rest of
the court.
Returns
-------
ax : Axes
The Axes object with the court on it.
|
entailment
|
def shot_chart(x, y, kind="scatter", title="", color="b", cmap=None,
xlim=(-250, 250), ylim=(422.5, -47.5),
court_color="gray", court_lw=1, outer_lines=False,
flip_court=False, kde_shade=True, gridsize=None, ax=None,
despine=False, **kwargs):
"""
Returns an Axes object with player shots plotted.
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as columns from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the
shot location coordinates.
kind : { "scatter", "kde", "hex" }, optional
The kind of shot chart to create.
title : str, optional
The title for the plot.
color : matplotlib color, optional
Color used to plot the shots
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the valuue passed to ``color``. Used for KDE
and Hexbin plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default
is ``False``, which orients the court where the hoop is towards the top
of the plot.
kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
ax : Axes, optional
The Axes object to plot the court onto.
despine : boolean, optional
If ``True``, removes the spines.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
ax : Axes
The Axes object with the shot chart plotted on it.
"""
if ax is None:
ax = plt.gca()
if cmap is None:
cmap = sns.light_palette(color, as_cmap=True)
if not flip_court:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
else:
ax.set_xlim(xlim[::-1])
ax.set_ylim(ylim[::-1])
ax.tick_params(labelbottom="off", labelleft="off")
ax.set_title(title, fontsize=18)
draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines)
if kind == "scatter":
ax.scatter(x, y, c=color, **kwargs)
elif kind == "kde":
sns.kdeplot(x, y, shade=kde_shade, cmap=cmap, ax=ax, **kwargs)
ax.set_xlabel('')
ax.set_ylabel('')
elif kind == "hex":
if gridsize is None:
# Get the number of bins for hexbin using Freedman-Diaconis rule
# This is idea was taken from seaborn, which got the calculation
# from http://stats.stackexchange.com/questions/798/
from seaborn.distributions import _freedman_diaconis_bins
x_bin = _freedman_diaconis_bins(x)
y_bin = _freedman_diaconis_bins(y)
gridsize = int(np.mean([x_bin, y_bin]))
ax.hexbin(x, y, gridsize=gridsize, cmap=cmap, **kwargs)
else:
raise ValueError("kind must be 'scatter', 'kde', or 'hex'.")
# Set the spines to match the rest of court lines, makes outer_lines
# somewhate unnecessary
for spine in ax.spines:
ax.spines[spine].set_lw(court_lw)
ax.spines[spine].set_color(court_color)
if despine:
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
return ax
|
Returns an Axes object with player shots plotted.
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as columns from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the
shot location coordinates.
kind : { "scatter", "kde", "hex" }, optional
The kind of shot chart to create.
title : str, optional
The title for the plot.
color : matplotlib color, optional
Color used to plot the shots
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the valuue passed to ``color``. Used for KDE
and Hexbin plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default
is ``False``, which orients the court where the hoop is towards the top
of the plot.
kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
ax : Axes, optional
The Axes object to plot the court onto.
despine : boolean, optional
If ``True``, removes the spines.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
ax : Axes
The Axes object with the shot chart plotted on it.
|
entailment
|
def shot_chart_jointgrid(x, y, data=None, joint_type="scatter", title="",
joint_color="b", cmap=None, xlim=(-250, 250),
ylim=(422.5, -47.5), court_color="gray", court_lw=1,
outer_lines=False, flip_court=False,
joint_kde_shade=True, gridsize=None,
marginals_color="b", marginals_type="both",
marginals_kde_shade=True, size=(12, 11), space=0,
despine=False, joint_kws=None, marginal_kws=None,
**kwargs):
"""
Returns a JointGrid object containing the shot chart.
This function allows for more flexibility in customizing your shot chart
than the ``shot_chart_jointplot`` function.
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as columns from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the shot
location coordinates.
joint_type : { "scatter", "kde", "hex" }, optional
The type of shot chart for the joint plot.
title : str, optional
The title for the plot.
joint_color : matplotlib color, optional
Color used to plot the shots on the joint plot.
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the value passed to ``color``. Used for KDE
and Hexbin joint plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot. The defaults represent the out of bounds
lines and half court line.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default is
``False``, which orients the court where the hoop is towards the top of
the plot.
joint_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the joint plot.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
marginals_color : matplotlib color, optional
Color used to plot the shots on the marginal plots.
marginals_type : { "both", "hist", "kde"}, optional
The type of plot for the marginal plots.
marginals_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the marginal
plots.
size : tuple, optional
The width and height of the plot in inches.
space : numeric, optional
The space between the joint and marginal plots.
despine : boolean, optional
If ``True``, removes the spines.
{joint, marginal}_kws : dicts
Additional kewyord arguments for joint and marginal plot components.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
grid : JointGrid
The JointGrid object with the shot chart plotted on it.
"""
# The joint_kws and marginal_kws idea was taken from seaborn
# Create the default empty kwargs for joint and marginal plots
if joint_kws is None:
joint_kws = {}
joint_kws.update(kwargs)
if marginal_kws is None:
marginal_kws = {}
# If a colormap is not provided, then it is based off of the joint_color
if cmap is None:
cmap = sns.light_palette(joint_color, as_cmap=True)
# Flip the court so that the hoop is by the bottom of the plot
if flip_court:
xlim = xlim[::-1]
ylim = ylim[::-1]
# Create the JointGrid to draw the shot chart plots onto
grid = sns.JointGrid(x=x, y=y, data=data, xlim=xlim, ylim=ylim,
space=space)
# Joint Plot
# Create the main plot of the joint shot chart
if joint_type == "scatter":
grid = grid.plot_joint(plt.scatter, color=joint_color, **joint_kws)
elif joint_type == "kde":
grid = grid.plot_joint(sns.kdeplot, cmap=cmap,
shade=joint_kde_shade, **joint_kws)
elif joint_type == "hex":
if gridsize is None:
# Get the number of bins for hexbin using Freedman-Diaconis rule
# This is idea was taken from seaborn, which got the calculation
# from http://stats.stackexchange.com/questions/798/
from seaborn.distributions import _freedman_diaconis_bins
x_bin = _freedman_diaconis_bins(x)
y_bin = _freedman_diaconis_bins(y)
gridsize = int(np.mean([x_bin, y_bin]))
grid = grid.plot_joint(plt.hexbin, gridsize=gridsize, cmap=cmap,
**joint_kws)
else:
raise ValueError("joint_type must be 'scatter', 'kde', or 'hex'.")
# Marginal plots
# Create the plots on the axis of the main plot of the joint shot chart.
if marginals_type == "both":
grid = grid.plot_marginals(sns.distplot, color=marginals_color,
**marginal_kws)
elif marginals_type == "hist":
grid = grid.plot_marginals(sns.distplot, color=marginals_color,
kde=False, **marginal_kws)
elif marginals_type == "kde":
grid = grid.plot_marginals(sns.kdeplot, color=marginals_color,
shade=marginals_kde_shade, **marginal_kws)
else:
raise ValueError("marginals_type must be 'both', 'hist', or 'kde'.")
# Set the size of the joint shot chart
grid.fig.set_size_inches(size)
# Extract the the first axes, which is the main plot of the
# joint shot chart, and draw the court onto it
ax = grid.fig.get_axes()[0]
draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines)
# Get rid of the axis labels
grid.set_axis_labels(xlabel="", ylabel="")
# Get rid of all tick labels
ax.tick_params(labelbottom="off", labelleft="off")
# Set the title above the top marginal plot
ax.set_title(title, y=1.2, fontsize=18)
# Set the spines to match the rest of court lines, makes outer_lines
# somewhate unnecessary
for spine in ax.spines:
ax.spines[spine].set_lw(court_lw)
ax.spines[spine].set_color(court_color)
# set the marginal spines to be the same as the rest of the spines
grid.ax_marg_x.spines[spine].set_lw(court_lw)
grid.ax_marg_x.spines[spine].set_color(court_color)
grid.ax_marg_y.spines[spine].set_lw(court_lw)
grid.ax_marg_y.spines[spine].set_color(court_color)
if despine:
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
return grid
|
Returns a JointGrid object containing the shot chart.
This function allows for more flexibility in customizing your shot chart
than the ``shot_chart_jointplot`` function.
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as columns from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the shot
location coordinates.
joint_type : { "scatter", "kde", "hex" }, optional
The type of shot chart for the joint plot.
title : str, optional
The title for the plot.
joint_color : matplotlib color, optional
Color used to plot the shots on the joint plot.
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the value passed to ``color``. Used for KDE
and Hexbin joint plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot. The defaults represent the out of bounds
lines and half court line.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default is
``False``, which orients the court where the hoop is towards the top of
the plot.
joint_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the joint plot.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
marginals_color : matplotlib color, optional
Color used to plot the shots on the marginal plots.
marginals_type : { "both", "hist", "kde"}, optional
The type of plot for the marginal plots.
marginals_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the marginal
plots.
size : tuple, optional
The width and height of the plot in inches.
space : numeric, optional
The space between the joint and marginal plots.
despine : boolean, optional
If ``True``, removes the spines.
{joint, marginal}_kws : dicts
Additional kewyord arguments for joint and marginal plot components.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
grid : JointGrid
The JointGrid object with the shot chart plotted on it.
|
entailment
|
def shot_chart_jointplot(x, y, data=None, kind="scatter", title="", color="b",
cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5),
court_color="gray", court_lw=1, outer_lines=False,
flip_court=False, size=(12, 11), space=0,
despine=False, joint_kws=None, marginal_kws=None,
**kwargs):
"""
Returns a seaborn JointGrid using sns.jointplot
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as column names from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the
shot location coordinates.
kind : { "scatter", "kde", "hex" }, optional
The kind of shot chart to create.
title : str, optional
The title for the plot.
color : matplotlib color, optional
Color used to plot the shots
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the valuue passed to ``color``. Used for KDE
and Hexbin joint plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot. The defaults represent the out of bounds
lines and half court line.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default
is ``False``, which orients the court where the hoop is towards the top
of the plot.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
size : tuple, optional
The width and height of the plot in inches.
space : numeric, optional
The space between the joint and marginal plots.
{joint, marginal}_kws : dicts
Additional kewyord arguments for joint and marginal plot components.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
grid : JointGrid
The JointGrid object with the shot chart plotted on it.
"""
# If a colormap is not provided, then it is based off of the color
if cmap is None:
cmap = sns.light_palette(color, as_cmap=True)
if kind not in ["scatter", "kde", "hex"]:
raise ValueError("kind must be 'scatter', 'kde', or 'hex'.")
grid = sns.jointplot(x=x, y=y, data=data, stat_func=None, kind=kind,
space=0, color=color, cmap=cmap, joint_kws=joint_kws,
marginal_kws=marginal_kws, **kwargs)
grid.fig.set_size_inches(size)
# A joint plot has 3 Axes, the first one called ax_joint
# is the one we want to draw our court onto and adjust some other settings
ax = grid.ax_joint
if not flip_court:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
else:
ax.set_xlim(xlim[::-1])
ax.set_ylim(ylim[::-1])
draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines)
# Get rid of axis labels and tick marks
ax.set_xlabel('')
ax.set_ylabel('')
ax.tick_params(labelbottom='off', labelleft='off')
# Add a title
ax.set_title(title, y=1.2, fontsize=18)
# Set the spines to match the rest of court lines, makes outer_lines
# somewhate unnecessary
for spine in ax.spines:
ax.spines[spine].set_lw(court_lw)
ax.spines[spine].set_color(court_color)
# set the margin joint spines to be same as the rest of the plot
grid.ax_marg_x.spines[spine].set_lw(court_lw)
grid.ax_marg_x.spines[spine].set_color(court_color)
grid.ax_marg_y.spines[spine].set_lw(court_lw)
grid.ax_marg_y.spines[spine].set_color(court_color)
if despine:
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
return grid
|
Returns a seaborn JointGrid using sns.jointplot
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as column names from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the
shot location coordinates.
kind : { "scatter", "kde", "hex" }, optional
The kind of shot chart to create.
title : str, optional
The title for the plot.
color : matplotlib color, optional
Color used to plot the shots
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the valuue passed to ``color``. Used for KDE
and Hexbin joint plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot. The defaults represent the out of bounds
lines and half court line.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default
is ``False``, which orients the court where the hoop is towards the top
of the plot.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
size : tuple, optional
The width and height of the plot in inches.
space : numeric, optional
The space between the joint and marginal plots.
{joint, marginal}_kws : dicts
Additional kewyord arguments for joint and marginal plot components.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
grid : JointGrid
The JointGrid object with the shot chart plotted on it.
|
entailment
|
def heatmap(x, y, z, title="", cmap=plt.cm.YlOrRd, bins=20,
xlim=(-250, 250), ylim=(422.5, -47.5),
facecolor='lightgray', facecolor_alpha=0.4,
court_color="black", court_lw=0.5, outer_lines=False,
flip_court=False, ax=None, **kwargs):
"""
Returns an AxesImage object that contains a heatmap.
TODO: Redo some code and explain parameters
"""
# Bin the FGA (x, y) and Calculcate the mean number of times shot was
# made (z) within each bin
# mean is the calculated FG percentage for each bin
mean, xedges, yedges, binnumber = binned_statistic_2d(x=x, y=y,
values=z,
statistic='mean',
bins=bins)
if ax is None:
ax = plt.gca()
if not flip_court:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
else:
ax.set_xlim(xlim[::-1])
ax.set_ylim(ylim[::-1])
ax.tick_params(labelbottom="off", labelleft="off")
ax.set_title(title, fontsize=18)
ax.patch.set_facecolor(facecolor)
ax.patch.set_alpha(facecolor_alpha)
draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines)
heatmap = ax.imshow(mean.T, origin='lower', extent=[xedges[0], xedges[-1],
yedges[0], yedges[-1]], interpolation='nearest',
cmap=cmap)
return heatmap
|
Returns an AxesImage object that contains a heatmap.
TODO: Redo some code and explain parameters
|
entailment
|
def bokeh_draw_court(figure, line_color='gray', line_width=1):
"""Returns a figure with the basketball court lines drawn onto it
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
figure : Bokeh figure object
The Axes object to plot the court onto.
line_color : str, optional
The color of the court lines. Can be a a Hex value.
line_width : float, optional
The linewidth the of the court lines in pixels.
Returns
-------
figure : Figure
The Figure object with the court on it.
"""
# hoop
figure.circle(x=0, y=0, radius=7.5, fill_alpha=0,
line_color=line_color, line_width=line_width)
# backboard
figure.line(x=range(-30, 31), y=-12.5, line_color=line_color)
# The paint
# outerbox
figure.rect(x=0, y=47.5, width=160, height=190, fill_alpha=0,
line_color=line_color, line_width=line_width)
# innerbox
# left inner box line
figure.line(x=-60, y=np.arange(-47.5, 143.5), line_color=line_color,
line_width=line_width)
# right inner box line
figure.line(x=60, y=np.arange(-47.5, 143.5), line_color=line_color,
line_width=line_width)
# Restricted Zone
figure.arc(x=0, y=0, radius=40, start_angle=pi, end_angle=0,
line_color=line_color, line_width=line_width)
# top free throw arc
figure.arc(x=0, y=142.5, radius=60, start_angle=pi, end_angle=0,
line_color=line_color)
# bottome free throw arc
figure.arc(x=0, y=142.5, radius=60, start_angle=0, end_angle=pi,
line_color=line_color, line_dash="dashed")
# Three point line
# corner three point lines
figure.line(x=-220, y=np.arange(-47.5, 92.5), line_color=line_color,
line_width=line_width)
figure.line(x=220, y=np.arange(-47.5, 92.5), line_color=line_color,
line_width=line_width)
# # three point arc
figure.arc(x=0, y=0, radius=237.5, start_angle=3.528, end_angle=-0.3863,
line_color=line_color, line_width=line_width)
# add center court
# outer center arc
figure.arc(x=0, y=422.5, radius=60, start_angle=0, end_angle=pi,
line_color=line_color, line_width=line_width)
# inner center arct
figure.arc(x=0, y=422.5, radius=20, start_angle=0, end_angle=pi,
line_color=line_color, line_width=line_width)
# outer lines, consistting of half court lines and out of bounds lines
figure.rect(x=0, y=187.5, width=500, height=470, fill_alpha=0,
line_color=line_color, line_width=line_width)
return figure
|
Returns a figure with the basketball court lines drawn onto it
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
figure : Bokeh figure object
The Axes object to plot the court onto.
line_color : str, optional
The color of the court lines. Can be a a Hex value.
line_width : float, optional
The linewidth the of the court lines in pixels.
Returns
-------
figure : Figure
The Figure object with the court on it.
|
entailment
|
def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4",
scatter_size=10, fill_alpha=0.4, line_alpha=0.4,
court_line_color='gray', court_line_width=1,
hover_tool=False, tooltips=None, **kwargs):
# TODO: Settings for hover tooltip
"""
Returns a figure with both FGA and basketball court lines drawn onto it.
This function expects data to be a ColumnDataSource with the x and y values
named "LOC_X" and "LOC_Y". Otherwise specify x and y.
Parameters
----------
data : DataFrame
The DataFrame that contains the shot chart data.
x, y : str, optional
The x and y coordinates of the shots taken.
fill_color : str, optional
The fill color of the shots. Can be a a Hex value.
scatter_size : int, optional
The size of the dots for the scatter plot.
fill_alpha : float, optional
Alpha value for the shots. Must be a floating point value between 0
(transparent) to 1 (opaque).
line_alpha : float, optiona
Alpha value for the outer lines of the plotted shots. Must be a
floating point value between 0 (transparent) to 1 (opaque).
court_line_color : str, optional
The color of the court lines. Can be a a Hex value.
court_line_width : float, optional
The linewidth the of the court lines in pixels.
hover_tool : boolean, optional
If ``True``, creates hover tooltip for the plot.
tooltips : List of tuples, optional
Provides the information for the the hover tooltip.
Returns
-------
fig : Figure
The Figure object with the shot chart plotted on it.
"""
source = ColumnDataSource(data)
fig = figure(width=700, height=658, x_range=[-250, 250],
y_range=[422.5, -47.5], min_border=0, x_axis_type=None,
y_axis_type=None, outline_line_color="black", **kwargs)
fig.scatter(x, y, source=source, size=scatter_size, color=fill_color,
alpha=fill_alpha, line_alpha=line_alpha)
bokeh_draw_court(fig, line_color=court_line_color,
line_width=court_line_width)
if hover_tool:
hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips)
fig.add_tools(hover)
return fig
|
Returns a figure with both FGA and basketball court lines drawn onto it.
This function expects data to be a ColumnDataSource with the x and y values
named "LOC_X" and "LOC_Y". Otherwise specify x and y.
Parameters
----------
data : DataFrame
The DataFrame that contains the shot chart data.
x, y : str, optional
The x and y coordinates of the shots taken.
fill_color : str, optional
The fill color of the shots. Can be a a Hex value.
scatter_size : int, optional
The size of the dots for the scatter plot.
fill_alpha : float, optional
Alpha value for the shots. Must be a floating point value between 0
(transparent) to 1 (opaque).
line_alpha : float, optiona
Alpha value for the outer lines of the plotted shots. Must be a
floating point value between 0 (transparent) to 1 (opaque).
court_line_color : str, optional
The color of the court lines. Can be a a Hex value.
court_line_width : float, optional
The linewidth the of the court lines in pixels.
hover_tool : boolean, optional
If ``True``, creates hover tooltip for the plot.
tooltips : List of tuples, optional
Provides the information for the the hover tooltip.
Returns
-------
fig : Figure
The Figure object with the shot chart plotted on it.
|
entailment
|
def _update_centers(X, membs, n_clusters, distance):
""" Update Cluster Centers:
calculate the mean of feature vectors for each cluster.
distance can be a string or callable.
"""
centers = np.empty(shape=(n_clusters, X.shape[1]), dtype=float)
sse = np.empty(shape=n_clusters, dtype=float)
for clust_id in range(n_clusters):
memb_ids = np.where(membs == clust_id)[0]
X_clust = X[memb_ids,:]
dist = np.empty(shape=memb_ids.shape[0], dtype=float)
for i,x in enumerate(X_clust):
dist[i] = np.sum(scipy.spatial.distance.cdist(X_clust, np.array([x]), distance))
inx_min = np.argmin(dist)
centers[clust_id,:] = X_clust[inx_min,:]
sse[clust_id] = dist[inx_min]
return(centers, sse)
|
Update Cluster Centers:
calculate the mean of feature vectors for each cluster.
distance can be a string or callable.
|
entailment
|
def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng):
""" Run a single trial of k-medoids clustering
on dataset X, and given number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = kmeans._kmeans_init(X, n_clusters, method='', rng=rng)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = kmeans._assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters, distance)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter)
|
Run a single trial of k-medoids clustering
on dataset X, and given number of clusters
|
entailment
|
def fit(self, X):
""" Apply KMeans Clustering
X: dataset with feature vectors
"""
self.centers_, self.labels_, self.sse_arr_, self.n_iter_ = \
_kmedoids(X, self.n_clusters, self.distance, self.max_iter, self.n_trials, self.tol, self.rng)
|
Apply KMeans Clustering
X: dataset with feature vectors
|
entailment
|
def _kernelized_dist2centers(K, n_clusters, wmemb, kernel_dist):
""" Computin the distance in transformed feature space to
cluster centers.
K is the kernel gram matrix.
wmemb contains cluster assignment. {0,1}
Assume j is the cluster id:
||phi(x_i) - Phi_center_j|| = K_ii - 2 sum w_jh K_ih +
sum_r sum_s w_jr w_js K_rs
"""
n_samples = K.shape[0]
for j in range(n_clusters):
memb_j = np.where(wmemb == j)[0]
size_j = memb_j.shape[0]
K_sub_j = K[memb_j][:, memb_j]
kernel_dist[:,j] = 1 + np.sum(K_sub_j) /(size_j*size_j)
kernel_dist[:,j] -= 2 * np.sum(K[:, memb_j], axis=1) / size_j
return
|
Computin the distance in transformed feature space to
cluster centers.
K is the kernel gram matrix.
wmemb contains cluster assignment. {0,1}
Assume j is the cluster id:
||phi(x_i) - Phi_center_j|| = K_ii - 2 sum w_jh K_ih +
sum_r sum_s w_jr w_js K_rs
|
entailment
|
def _init_mixture_params(X, n_mixtures, init_method):
"""
Initialize mixture density parameters with
equal priors
random means
identity covariance matrices
"""
init_priors = np.ones(shape=n_mixtures, dtype=float) / n_mixtures
if init_method == 'kmeans':
km = _kmeans.KMeans(n_clusters = n_mixtures, n_trials=20)
km.fit(X)
init_means = km.centers_
else:
inx_rand = np.random.choice(X.shape[0], size=n_mixtures)
init_means = X[inx_rand,:]
if np.any(np.isnan(init_means)):
raise ValueError("Init means are NaN! ")
n_features = X.shape[1]
init_covars = np.empty(shape=(n_mixtures, n_features, n_features), dtype=float)
for i in range(n_mixtures):
init_covars[i] = np.eye(n_features)
return(init_priors, init_means, init_covars)
|
Initialize mixture density parameters with
equal priors
random means
identity covariance matrices
|
entailment
|
def __log_density_single(x, mean, covar):
""" This is just a test function to calculate
the normal density at x given mean and covariance matrix.
Note: this function is not efficient, so
_log_multivariate_density is recommended for use.
"""
n_dim = mean.shape[0]
dx = x - mean
covar_inv = scipy.linalg.inv(covar)
covar_det = scipy.linalg.det(covar)
den = np.dot(np.dot(dx.T, covar_inv), dx) + n_dim*np.log(2*np.pi) + np.log(covar_det)
return(-1/2 * den)
|
This is just a test function to calculate
the normal density at x given mean and covariance matrix.
Note: this function is not efficient, so
_log_multivariate_density is recommended for use.
|
entailment
|
def _log_multivariate_density(X, means, covars):
"""
Class conditional density:
P(x | mu, Sigma) = 1/((2pi)^d/2 * |Sigma|^1/2) * exp(-1/2 * (x-mu)^T * Sigma^-1 * (x-mu))
log of class conditional density:
log P(x | mu, Sigma) = -1/2*(d*log(2pi) + log(|Sigma|) + (x-mu)^T * Sigma^-1 * (x-mu))
"""
n_samples, n_dim = X.shape
n_components = means.shape[0]
assert(means.shape[0] == covars.shape[0])
log_proba = np.empty(shape=(n_samples, n_components), dtype=float)
for i, (mu, cov) in enumerate(zip(means, covars)):
try:
cov_chol = scipy.linalg.cholesky(cov, lower=True)
except scipy.linalg.LinAlgError:
try:
cov_chol = scipy.linalg.cholesky(cov + Lambda*np.eye(n_dim), lower=True)
except:
raise ValueError("Triangular Matrix Decomposition not performed!\n")
cov_log_det = 2 * np.sum(np.log(np.diagonal(cov_chol)))
try:
cov_solve = scipy.linalg.solve_triangular(cov_chol, (X - mu).T, lower=True).T
except:
raise ValueError("Solve_triangular not perormed!\n")
log_proba[:, i] = -0.5 * (np.sum(cov_solve ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cov_log_det)
return(log_proba)
|
Class conditional density:
P(x | mu, Sigma) = 1/((2pi)^d/2 * |Sigma|^1/2) * exp(-1/2 * (x-mu)^T * Sigma^-1 * (x-mu))
log of class conditional density:
log P(x | mu, Sigma) = -1/2*(d*log(2pi) + log(|Sigma|) + (x-mu)^T * Sigma^-1 * (x-mu))
|
entailment
|
def _log_likelihood_per_sample(X, means, covars):
"""
Theta = (theta_1, theta_2, ... theta_M)
Likelihood of mixture parameters given data: L(Theta | X) = product_i P(x_i | Theta)
log likelihood: log L(Theta | X) = sum_i log(P(x_i | Theta))
and note that p(x_i | Theta) = sum_j prior_j * p(x_i | theta_j)
Probability of sample x being generated from component i:
P(w_i | x) = P(x|w_i) * P(w_i) / P(X)
where P(X) = sum_i P(x|w_i) * P(w_i)
Here post_proba = P/(w_i | x)
and log_likelihood = log(P(x|w_i))
"""
logden = _log_multivariate_density(X, means, covars)
logden_max = logden.max(axis=1)
log_likelihood = np.log(np.sum(np.exp(logden.T - logden_max) + Epsilon, axis=0))
log_likelihood += logden_max
post_proba = np.exp(logden - log_likelihood[:, np.newaxis])
return (log_likelihood, post_proba)
|
Theta = (theta_1, theta_2, ... theta_M)
Likelihood of mixture parameters given data: L(Theta | X) = product_i P(x_i | Theta)
log likelihood: log L(Theta | X) = sum_i log(P(x_i | Theta))
and note that p(x_i | Theta) = sum_j prior_j * p(x_i | theta_j)
Probability of sample x being generated from component i:
P(w_i | x) = P(x|w_i) * P(w_i) / P(X)
where P(X) = sum_i P(x|w_i) * P(w_i)
Here post_proba = P/(w_i | x)
and log_likelihood = log(P(x|w_i))
|
entailment
|
def _validate_params(priors, means, covars):
""" Validation Check for M.L. paramateres
"""
for i,(p,m,cv) in enumerate(zip(priors, means, covars)):
if np.any(np.isinf(p)) or np.any(np.isnan(p)):
raise ValueError("Component %d of priors is not valid " % i)
if np.any(np.isinf(m)) or np.any(np.isnan(m)):
raise ValueError("Component %d of means is not valid " % i)
if np.any(np.isinf(cv)) or np.any(np.isnan(cv)):
raise ValueError("Component %d of covars is not valid " % i)
if (not np.allclose(cv, cv.T) or np.any(scipy.linalg.eigvalsh(cv) <= 0)):
raise ValueError("Component %d of covars must be positive-definite" % i)
|
Validation Check for M.L. paramateres
|
entailment
|
def _maximization_step(X, posteriors):
"""
Update class parameters as below:
priors: P(w_i) = sum_x P(w_i | x) ==> Then normalize to get in [0,1]
Class means: center_w_i = sum_x P(w_i|x)*x / sum_i sum_x P(w_i|x)
"""
### Prior probabilities or class weights
sum_post_proba = np.sum(posteriors, axis=0)
prior_proba = sum_post_proba / (sum_post_proba.sum() + Epsilon)
### means
means = np.dot(posteriors.T, X) / (sum_post_proba[:, np.newaxis] + Epsilon)
### covariance matrices
n_components = posteriors.shape[1]
n_features = X.shape[1]
covars = np.empty(shape=(n_components, n_features, n_features), dtype=float)
for i in range(n_components):
post_i = posteriors[:, i]
mean_i = means[i]
diff_i = X - mean_i
with np.errstate(under='ignore'):
covar_i = np.dot(post_i * diff_i.T, diff_i) / (post_i.sum() + Epsilon)
covars[i] = covar_i + Lambda * np.eye(n_features)
_validate_params(prior_proba, means, covars)
return(prior_proba, means, covars)
|
Update class parameters as below:
priors: P(w_i) = sum_x P(w_i | x) ==> Then normalize to get in [0,1]
Class means: center_w_i = sum_x P(w_i|x)*x / sum_i sum_x P(w_i|x)
|
entailment
|
def fit(self, X):
""" Fit mixture-density parameters with EM algorithm
"""
params_dict = _fit_gmm_params(X=X, n_mixtures=self.n_clusters, \
n_init=self.n_trials, init_method=self.init_method, \
n_iter=self.max_iter, tol=self.tol)
self.priors_ = params_dict['priors']
self.means_ = params_dict['means']
self.covars_ = params_dict['covars']
self.converged = True
self.labels_ = self.predict(X)
|
Fit mixture-density parameters with EM algorithm
|
entailment
|
def _kmeans_init(X, n_clusters, method='balanced', rng=None):
""" Initialize k=n_clusters centroids randomly
"""
n_samples = X.shape[0]
if rng is None:
cent_idx = np.random.choice(n_samples, replace=False, size=n_clusters)
else:
#print('Generate random centers using RNG')
cent_idx = rng.choice(n_samples, replace=False, size=n_clusters)
centers = X[cent_idx,:]
mean_X = np.mean(X, axis=0)
if method == 'balanced':
centers[n_clusters-1] = n_clusters*mean_X - np.sum(centers[:(n_clusters-1)], axis=0)
return (centers)
|
Initialize k=n_clusters centroids randomly
|
entailment
|
def _assign_clusters(X, centers):
""" Assignment Step:
assign each point to the closet cluster center
"""
dist2cents = scipy.spatial.distance.cdist(X, centers, metric='seuclidean')
membs = np.argmin(dist2cents, axis=1)
return(membs)
|
Assignment Step:
assign each point to the closet cluster center
|
entailment
|
def _cal_dist2center(X, center):
""" Calculate the SSE to the cluster center
"""
dmemb2cen = scipy.spatial.distance.cdist(X, center.reshape(1,X.shape[1]), metric='seuclidean')
return(np.sum(dmemb2cen))
|
Calculate the SSE to the cluster center
|
entailment
|
def _update_centers(X, membs, n_clusters):
""" Update Cluster Centers:
calculate the mean of feature vectors for each cluster
"""
centers = np.empty(shape=(n_clusters, X.shape[1]), dtype=float)
sse = np.empty(shape=n_clusters, dtype=float)
for clust_id in range(n_clusters):
memb_ids = np.where(membs == clust_id)[0]
if memb_ids.shape[0] == 0:
memb_ids = np.random.choice(X.shape[0], size=1)
#print("Empty cluster replaced with ", memb_ids)
centers[clust_id,:] = np.mean(X[memb_ids,:], axis=0)
sse[clust_id] = _cal_dist2center(X[memb_ids,:], centers[clust_id,:])
return(centers, sse)
|
Update Cluster Centers:
calculate the mean of feature vectors for each cluster
|
entailment
|
def _kmeans_run(X, n_clusters, max_iter, tol):
""" Run a single trial of k-means clustering
on dataset X, and given number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = _kmeans_init(X, n_clusters)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = _assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter)
|
Run a single trial of k-means clustering
on dataset X, and given number of clusters
|
entailment
|
def _kmeans(X, n_clusters, max_iter, n_trials, tol):
""" Run multiple trials of k-means clustering,
and outputt he best centers, and cluster labels
"""
n_samples, n_features = X.shape[0], X.shape[1]
centers_best = np.empty(shape=(n_clusters,n_features), dtype=float)
labels_best = np.empty(shape=n_samples, dtype=int)
for i in range(n_trials):
centers, labels, sse_tot, sse_arr, n_iter = _kmeans_run(X, n_clusters, max_iter, tol)
if i==0:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
if sse_tot < sse_tot_best:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
return(centers_best, labels_best, sse_arr_best, n_iter_best)
|
Run multiple trials of k-means clustering,
and outputt he best centers, and cluster labels
|
entailment
|
def fit(self, X):
""" Apply KMeans Clustering
X: dataset with feature vectors
"""
self.centers_, self.labels_, self.sse_arr_, self.n_iter_ = \
_kmeans(X, self.n_clusters, self.max_iter, self.n_trials, self.tol)
|
Apply KMeans Clustering
X: dataset with feature vectors
|
entailment
|
def _cut_tree(tree, n_clusters, membs):
""" Cut the tree to get desired number of clusters as n_clusters
2 <= n_desired <= n_clusters
"""
## starting from root,
## a node is added to the cut_set or
## its children are added to node_set
assert(n_clusters >= 2)
assert(n_clusters <= len(tree.leaves()))
cut_centers = dict() #np.empty(shape=(n_clusters, ndim), dtype=float)
for i in range(n_clusters-1):
if i==0:
search_set = set(tree.children(0))
node_set,cut_set = set(), set()
else:
search_set = node_set.union(cut_set)
node_set,cut_set = set(), set()
if i+2 == n_clusters:
cut_set = search_set
else:
for _ in range(len(search_set)):
n = search_set.pop()
if n.data['ilev'] is None or n.data['ilev']>i+2:
cut_set.add(n)
else:
nid = n.identifier
if n.data['ilev']-2==i:
node_set = node_set.union(set(tree.children(nid)))
conv_membs = membs.copy()
for node in cut_set:
nid = node.identifier
label = node.data['label']
cut_centers[label] = node.data['center']
sub_leaves = tree.leaves(nid)
for leaf in sub_leaves:
indx = np.where(conv_membs == leaf)[0]
conv_membs[indx] = nid
return(conv_membs, cut_centers)
|
Cut the tree to get desired number of clusters as n_clusters
2 <= n_desired <= n_clusters
|
entailment
|
def _add_tree_node(tree, label, ilev, X=None, size=None, center=None, sse=None, parent=None):
""" Add a node to the tree
if parent is not known, the node is a root
The nodes of this tree keep properties of each cluster/subcluster:
size --> cluster size as the number of points in the cluster
center --> mean of the cluster
label --> cluster label
sse --> sum-squared-error for that single cluster
ilev --> the level at which this node is split into 2 children
"""
if size is None:
size = X.shape[0]
if (center is None):
center = np.mean(X, axis=0)
if (sse is None):
sse = _kmeans._cal_dist2center(X, center)
center = list(center)
datadict = {
'size' : size,
'center': center,
'label' : label,
'sse' : sse,
'ilev' : None
}
if (parent is None):
tree.create_node(label, label, data=datadict)
else:
tree.create_node(label, label, parent=parent, data=datadict)
tree.get_node(parent).data['ilev'] = ilev
return(tree)
|
Add a node to the tree
if parent is not known, the node is a root
The nodes of this tree keep properties of each cluster/subcluster:
size --> cluster size as the number of points in the cluster
center --> mean of the cluster
label --> cluster label
sse --> sum-squared-error for that single cluster
ilev --> the level at which this node is split into 2 children
|
entailment
|
def _bisect_kmeans(X, n_clusters, n_trials, max_iter, tol):
""" Apply Bisecting Kmeans clustering
to reach n_clusters number of clusters
"""
membs = np.empty(shape=X.shape[0], dtype=int)
centers = dict() #np.empty(shape=(n_clusters,X.shape[1]), dtype=float)
sse_arr = dict() #-1.0*np.ones(shape=n_clusters, dtype=float)
## data structure to store cluster hierarchies
tree = treelib.Tree()
tree = _add_tree_node(tree, 0, ilev=0, X=X)
km = _kmeans.KMeans(n_clusters=2, n_trials=n_trials, max_iter=max_iter, tol=tol)
for i in range(1,n_clusters):
sel_clust_id,sel_memb_ids = _select_cluster_2_split(membs, tree)
X_sub = X[sel_memb_ids,:]
km.fit(X_sub)
#print("Bisecting Step %d :"%i, sel_clust_id, km.sse_arr_, km.centers_)
## Updating the clusters & properties
#sse_arr[[sel_clust_id,i]] = km.sse_arr_
#centers[[sel_clust_id,i]] = km.centers_
tree = _add_tree_node(tree, 2*i-1, i, \
size=np.sum(km.labels_ == 0), center=km.centers_[0], \
sse=km.sse_arr_[0], parent= sel_clust_id)
tree = _add_tree_node(tree, 2*i, i, \
size=np.sum(km.labels_ == 1), center=km.centers_[1], \
sse=km.sse_arr_[1], parent= sel_clust_id)
pred_labels = km.labels_
pred_labels[np.where(pred_labels == 1)[0]] = 2*i
pred_labels[np.where(pred_labels == 0)[0]] = 2*i - 1
#if sel_clust_id == 1:
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
# pred_labels[np.where(pred_labels == 1)[0]] = i
#else:
# pred_labels[np.where(pred_labels == 1)[0]] = i
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
membs[sel_memb_ids] = pred_labels
for n in tree.leaves():
label = n.data['label']
centers[label] = n.data['center']
sse_arr[label] = n.data['sse']
return(centers, membs, sse_arr, tree)
|
Apply Bisecting Kmeans clustering
to reach n_clusters number of clusters
|
entailment
|
def dic(self):
r""" Returns the corrected Deviance Information Criterion (DIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, this method will return `None` for that chain. **Note that
the DIC metric is only valid on posterior surfaces which closely resemble multivariate normals!**
Formally, we follow Liddle (2007) and first define *Bayesian complexity* as
.. math::
p_D = \bar{D}(\theta) - D(\bar{\theta}),
where :math:`D(\theta) = -2\ln(P(\theta)) + C` is the deviance, where :math:`P` is the posterior
and :math:`C` a constant. From here the DIC is defined as
.. math::
DIC \equiv D(\bar{\theta}) + 2p_D = \bar{D}(\theta) + p_D.
Returns
-------
list[float]
A list of all the DIC values - one per chain, in the order in which the chains were added.
References
----------
[1] Andrew R. Liddle, "Information criteria for astrophysical model selection", MNRAS (2007)
"""
dics = []
dics_bool = []
for i, chain in enumerate(self.parent.chains):
p = chain.posterior
if p is None:
dics_bool.append(False)
self._logger.warn("You need to set the posterior for chain %s to get the DIC" % chain.name)
else:
dics_bool.append(True)
num_params = chain.chain.shape[1]
means = np.array([np.average(chain.chain[:, ii], weights=chain.weights) for ii in range(num_params)])
d = -2 * p
d_of_mean = griddata(chain.chain, d, means, method='nearest')[0]
mean_d = np.average(d, weights=chain.weights)
p_d = mean_d - d_of_mean
dic = mean_d + p_d
dics.append(dic)
if len(dics) > 0:
dics -= np.min(dics)
dics_fin = []
i = 0
for b in dics_bool:
if not b:
dics_fin.append(None)
else:
dics_fin.append(dics[i])
i += 1
return dics_fin
|
r""" Returns the corrected Deviance Information Criterion (DIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, this method will return `None` for that chain. **Note that
the DIC metric is only valid on posterior surfaces which closely resemble multivariate normals!**
Formally, we follow Liddle (2007) and first define *Bayesian complexity* as
.. math::
p_D = \bar{D}(\theta) - D(\bar{\theta}),
where :math:`D(\theta) = -2\ln(P(\theta)) + C` is the deviance, where :math:`P` is the posterior
and :math:`C` a constant. From here the DIC is defined as
.. math::
DIC \equiv D(\bar{\theta}) + 2p_D = \bar{D}(\theta) + p_D.
Returns
-------
list[float]
A list of all the DIC values - one per chain, in the order in which the chains were added.
References
----------
[1] Andrew R. Liddle, "Information criteria for astrophysical model selection", MNRAS (2007)
|
entailment
|
def bic(self):
r""" Returns the corrected Bayesian Information Criterion (BIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the BIC is defined as
.. math::
BIC \equiv -2\ln(P) + k \ln(N),
where :math:`P` represents the posterior, :math:`k` the number of model parameters and :math:`N`
the number of independent data points used in the model fitting.
Returns
-------
list[float]
A list of all the BIC values - one per chain, in the order in which the chains were added.
"""
bics = []
bics_bool = []
for i, chain in enumerate(self.parent.chains):
p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params
if p is None or n_data is None or n_free is None:
bics_bool.append(False)
missing = ""
if p is None:
missing += "posterior, "
if n_data is None:
missing += "num_eff_data_points, "
if n_free is None:
missing += "num_free_params, "
self._logger.warn("You need to set %s for chain %s to get the BIC" %
(missing[:-2], chain.name))
else:
bics_bool.append(True)
bics.append(n_free * np.log(n_data) - 2 * np.max(p))
if len(bics) > 0:
bics -= np.min(bics)
bics_fin = []
i = 0
for b in bics_bool:
if not b:
bics_fin.append(None)
else:
bics_fin.append(bics[i])
i += 1
return bics_fin
|
r""" Returns the corrected Bayesian Information Criterion (BIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the BIC is defined as
.. math::
BIC \equiv -2\ln(P) + k \ln(N),
where :math:`P` represents the posterior, :math:`k` the number of model parameters and :math:`N`
the number of independent data points used in the model fitting.
Returns
-------
list[float]
A list of all the BIC values - one per chain, in the order in which the chains were added.
|
entailment
|
def aic(self):
r""" Returns the corrected Akaike Information Criterion (AICc) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the AIC is defined as
.. math::
AIC \equiv -2\ln(P) + 2k,
where :math:`P` represents the posterior, and :math:`k` the number of model parameters. The AICc
is then defined as
.. math::
AIC_c \equiv AIC + \frac{2k(k+1)}{N-k-1},
where :math:`N` represents the number of independent data points used in the model fitting.
The AICc is a correction for the AIC to take into account finite chain sizes.
Returns
-------
list[float]
A list of all the AICc values - one per chain, in the order in which the chains were added.
"""
aics = []
aics_bool = []
for i, chain in enumerate(self.parent.chains):
p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params
if p is None or n_data is None or n_free is None:
aics_bool.append(False)
missing = ""
if p is None:
missing += "posterior, "
if n_data is None:
missing += "num_eff_data_points, "
if n_free is None:
missing += "num_free_params, "
self._logger.warn("You need to set %s for chain %s to get the AIC" %
(missing[:-2], chain.name))
else:
aics_bool.append(True)
c_cor = (1.0 * n_free * (n_free + 1) / (n_data - n_free - 1))
aics.append(2.0 * (n_free + c_cor - np.max(p)))
if len(aics) > 0:
aics -= np.min(aics)
aics_fin = []
i = 0
for b in aics_bool:
if not b:
aics_fin.append(None)
else:
aics_fin.append(aics[i])
i += 1
return aics_fin
|
r""" Returns the corrected Akaike Information Criterion (AICc) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, number of data points, and number of free parameters
loaded, this method will return `None` for that chain. Formally, the AIC is defined as
.. math::
AIC \equiv -2\ln(P) + 2k,
where :math:`P` represents the posterior, and :math:`k` the number of model parameters. The AICc
is then defined as
.. math::
AIC_c \equiv AIC + \frac{2k(k+1)}{N-k-1},
where :math:`N` represents the number of independent data points used in the model fitting.
The AICc is a correction for the AIC to take into account finite chain sizes.
Returns
-------
list[float]
A list of all the AICc values - one per chain, in the order in which the chains were added.
|
entailment
|
def comparison_table(self, caption=None, label="tab:model_comp", hlines=True,
aic=True, bic=True, dic=True, sort="bic", descending=True): # pragma: no cover
"""
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
"""
if sort == "bic":
assert bic, "You cannot sort by BIC if you turn it off"
if sort == "aic":
assert aic, "You cannot sort by AIC if you turn it off"
if sort == "dic":
assert dic, "You cannot sort by DIC if you turn it off"
if caption is None:
caption = ""
if label is None:
label = ""
base_string = get_latex_table_frame(caption, label)
end_text = " \\\\ \n"
num_cols = 1 + (1 if aic else 0) + (1 if bic else 0)
column_text = "c" * (num_cols + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text
center_text += "\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \
+ (" & DIC " if dic else "") + end_text
if hlines:
center_text += "\t" + hline_text
if aic:
aics = self.aic()
else:
aics = np.zeros(len(self.parent.chains))
if bic:
bics = self.bic()
else:
bics = np.zeros(len(self.parent.chains))
if dic:
dics = self.dic()
else:
dics = np.zeros(len(self.parent.chains))
if sort == "bic":
to_sort = bics
elif sort == "aic":
to_sort = aics
elif sort == "dic":
to_sort = dics
else:
raise ValueError("sort %s not recognised, must be dic, aic or dic" % sort)
good = [i for i, t in enumerate(to_sort) if t is not None]
names = [self.parent.chains[g].name for g in good]
aics = [aics[g] for g in good]
bics = [bics[g] for g in good]
to_sort = bics if sort == "bic" else aics
indexes = np.argsort(to_sort)
if descending:
indexes = indexes[::-1]
for i in indexes:
line = "\t" + names[i]
if aic:
line += " & %5.1f " % aics[i]
if bic:
line += " & %5.1f " % bics[i]
if dic:
line += " & %5.1f " % dics[i]
line += end_text
center_text += line
if hlines:
center_text += "\t" + hline_text
return base_string % (column_text, center_text)
|
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
|
entailment
|
def evaluate(self, data):
""" Estimate un-normalised probability density at target points
Parameters
----------
data : np.ndarray
A `(num_targets, num_dim)` array of points to investigate.
Returns
-------
np.ndarray
A `(num_targets)` length array of estimates
Returns array of probability densities
"""
if len(data.shape) == 1 and self.num_dim == 1:
data = np.atleast_2d(data).T
_d = np.dot(data - self.mean, self.A)
# Get all points within range of kernels
neighbors = self.tree.query_ball_point(_d, self.sigma * self.truncation)
out = []
for i, n in enumerate(neighbors):
if len(n) >= self.nmin:
diff = self.d[n, :] - _d[i]
distsq = np.sum(diff * diff, axis=1)
else:
# If too few points get nmin closest
dist, n = self.tree.query(_d[i], k=self.nmin)
distsq = dist * dist
out.append(np.sum(self.weights[n] * np.exp(self.sigma_fact * distsq)))
return np.array(out)
|
Estimate un-normalised probability density at target points
Parameters
----------
data : np.ndarray
A `(num_targets, num_dim)` array of points to investigate.
Returns
-------
np.ndarray
A `(num_targets)` length array of estimates
Returns array of probability densities
|
entailment
|
def plot(self, figsize="GROW", parameters=None, chains=None, extents=None, filename=None,
display=False, truth=None, legend=None, blind=None, watermark=None): # pragma: no cover
""" Plot the chain!
Parameters
----------
figsize : str|tuple(float)|float, optional
The figure size to generate. Accepts a regular two tuple of size in inches,
or one of several key words. The default value of ``COLUMN`` creates a figure
of appropriate size of insertion into an A4 LaTeX document in two-column mode.
``PAGE`` creates a full page width figure. ``GROW`` creates an image that
scales with parameters (1.5 inches per parameter). String arguments are not
case sensitive. If you pass a float, it will scale the default ``GROW`` by
that amount, so ``2.0`` would result in a plot 3 inches per parameter.
parameters : list[str]|int, optional
If set, only creates a plot for those specific parameters (if list). If an
integer is given, only plots the fist so many parameters.
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
extents : list[tuple[float]] or dict[str], optional
Extents are given as two-tuples. You can pass in a list the same size as
parameters (or default parameters if you don't specify parameters),
or as a dictionary.
filename : str, optional
If set, saves the figure to this location
display : bool, optional
If True, shows the figure using ``plt.show()``.
truth : list[float] or dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values indexed by key
legend : bool, optional
If true, creates a legend in your plot using the chain names.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
watermark : str, optional
A watermark to add to the figure
Returns
-------
figure
the matplotlib figure
"""
chains, parameters, truth, extents, blind = self._sanitise(chains, parameters, truth,
extents, color_p=True, blind=blind)
names = [chain.name for chain in chains]
if legend is None:
legend = len(chains) > 1
# If no chains have names, don't plot the legend
legend = legend and len([n for n in names if n]) > 0
# Calculate cmap extents
unique_color_params = list(set([c.config["color_params"] for c in chains if c.config["color_params"] is not None]))
num_cax = len(unique_color_params)
color_param_extents = {}
for u in unique_color_params:
umin, umax = np.inf, -np.inf
for chain in chains:
if chain.config["color_params"] == u:
data = chain.get_color_data()
if data is not None:
umin = min(umin, data.min())
umax = max(umax, data.max())
color_param_extents[u] = (umin, umax)
grow_size = 1.5
if isinstance(figsize, float):
grow_size *= figsize
figsize = "GROW"
if isinstance(figsize, str):
if figsize.upper() == "COLUMN":
figsize = (5 + (1 if num_cax > 0 else 0), 5)
elif figsize.upper() == "PAGE":
figsize = (10, 10)
elif figsize.upper() == "GROW":
figsize = (grow_size * len(parameters) + num_cax * 1.0, grow_size * len(parameters))
else:
raise ValueError("Unknown figure size %s" % figsize)
elif isinstance(figsize, float):
figsize = (figsize * grow_size * len(parameters), figsize * grow_size * len(parameters))
plot_hists = self.parent.config["plot_hists"]
flip = (len(parameters) == 2 and plot_hists and self.parent.config["flip"])
fig, axes, params1, params2, extents = self._get_figure(parameters, chains=chains, figsize=figsize, flip=flip,
external_extents=extents, blind=blind)
axl = axes.ravel().tolist()
summary = self.parent.config["summary"]
if summary is None:
summary = len(parameters) < 5 and len(self.parent.chains) == 1
if len(chains) == 1:
self._logger.debug("Plotting surfaces for chain of dimension %s" %
(chains[0].chain.shape,))
else:
self._logger.debug("Plotting surfaces for %d chains" % len(chains))
cbar_done = []
chain_points = [c for c in chains if c.config["plot_point"]]
num_chain_points = len(chain_points)
if num_chain_points:
subgroup_names = list(set([c.name for c in chain_points]))
subgroups = [[c for c in chain_points if c.name == n] for n in subgroup_names]
markers = [group[0].config["marker_style"] for group in subgroups] # Only one marker per group
marker_sizes = [[g.config["marker_size"] for g in group] for group in subgroups] # But size can diff
marker_alphas = [group[0].config["marker_alpha"] for group in subgroups] # Only one marker per group
for i, p1 in enumerate(params1):
for j, p2 in enumerate(params2):
if i < j:
continue
ax = axes[i, j]
do_flip = (flip and i == len(params1) - 1)
# Plot the histograms
if plot_hists and i == j:
if do_flip:
self._add_truth(ax, truth, p1)
else:
self._add_truth(ax, truth, None, py=p2)
max_val = None
# Plot each chain
for chain in chains:
if p1 not in chain.parameters:
continue
if not chain.config["plot_contour"]:
continue
param_summary = summary and p1 not in blind
m = self._plot_bars(ax, p1, chain, flip=do_flip, summary=param_summary)
if max_val is None or m > max_val:
max_val = m
if num_chain_points and self.parent.config["global_point"]:
m = self._plot_point_histogram(ax, subgroups, p1, flip=do_flip)
if max_val is None or m > max_val:
max_val = m
if max_val is not None:
if do_flip:
ax.set_xlim(0, 1.1 * max_val)
else:
ax.set_ylim(0, 1.1 * max_val)
else:
for chain in chains:
if p1 not in chain.parameters or p2 not in chain.parameters:
continue
if not chain.config["plot_contour"]:
continue
h = None
if p1 in chain.parameters and p2 in chain.parameters:
h = self._plot_contour(ax, chain, p1, p2, color_extents=color_param_extents)
cp = chain.config["color_params"]
if h is not None and cp is not None and cp not in cbar_done:
cbar_done.append(cp)
aspect = figsize[1] / 0.15
fraction = 0.85 / figsize[0]
cbar = fig.colorbar(h, ax=axl, aspect=aspect, pad=0.03, fraction=fraction, drawedges=False)
label = cp
if label == "weights":
label = "Weights"
elif label == "log_weights":
label = "log(Weights)"
elif label == "posterior":
label = "log(Posterior)"
cbar.set_label(label, fontsize=14)
cbar.solids.set(alpha=1)
if num_chain_points:
self._plot_points(ax, subgroups, markers, marker_sizes, marker_alphas, p1, p2)
self._add_truth(ax, truth, p1, py=p2)
colors = [c.config["color"] for c in chains]
plot_points = [c.config["plot_point"] for c in chains]
plot_contours = [c.config["plot_contour"] for c in chains]
linestyles = [c.config["linestyle"] for c in chains]
linewidths = [c.config["linewidth"] for c in chains]
marker_styles = [c.config["marker_style"] for c in chains]
marker_sizes = [c.config["marker_size"] for c in chains]
legend_kwargs = self.parent.config["legend_kwargs"]
legend_artists = self.parent.config["legend_artists"]
legend_color_text = self.parent.config["legend_color_text"]
legend_location = self.parent.config["legend_location"]
if legend_location is None:
if not flip or len(parameters) > 2:
legend_location = (0, -1)
else:
legend_location = (-1, 0)
outside = (legend_location[0] >= legend_location[1])
if names is not None and legend:
ax = axes[legend_location[0], legend_location[1]]
if "markerfirst" not in legend_kwargs:
# If we have legend inside a used subplot, switch marker order
legend_kwargs["markerfirst"] = outside or not legend_artists
linewidths2 = linewidths if legend_artists else [0]*len(linewidths)
linestyles2 = linestyles if legend_artists else ["-"]*len(linestyles)
marker_sizes2 = marker_sizes if legend_artists else [0]*len(linestyles)
artists = []
done_names = []
final_colors = []
for i, (n, c, ls, lw, marker, size, pp, pc) in enumerate(zip(names, colors, linestyles2, linewidths2,
marker_styles, marker_sizes2, plot_points, plot_contours)):
if n is None or n in done_names:
continue
done_names.append(n)
final_colors.append(c)
size = np.sqrt(size) # plot vs scatter use size differently, hence the sqrt
if pc and not pp:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=lw))
elif not pc and pp:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=0, marker=marker, markersize=size))
else:
artists.append(plt.Line2D((0, 1), (0, 0), color=c, ls=ls, lw=lw, marker=marker, markersize=size))
leg = ax.legend(artists, done_names, **legend_kwargs)
if legend_color_text:
for text, c in zip(leg.get_texts(), final_colors):
text.set_weight("medium")
text.set_color(c)
if not outside:
loc = legend_kwargs.get("loc") or ""
if "right" in loc.lower():
vp = leg._legend_box._children[-1]._children[0]
vp.align = "right"
fig.canvas.draw()
for ax in axes[-1, :]:
offset = ax.get_xaxis().get_offset_text()
ax.set_xlabel('{0} {1}'.format(ax.get_xlabel(), "[{0}]".format(offset.get_text()) if offset.get_text() else ""))
offset.set_visible(False)
for ax in axes[:, 0]:
offset = ax.get_yaxis().get_offset_text()
ax.set_ylabel('{0} {1}'.format(ax.get_ylabel(), "[{0}]".format(offset.get_text()) if offset.get_text() else ""))
offset.set_visible(False)
dpi = 300
if watermark:
if flip and len(parameters) == 2:
ax = axes[-1, 0]
else:
ax = None
self._add_watermark(fig, ax, figsize, watermark, dpi=dpi)
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, dpi)
if display:
plt.show()
return fig
|
Plot the chain!
Parameters
----------
figsize : str|tuple(float)|float, optional
The figure size to generate. Accepts a regular two tuple of size in inches,
or one of several key words. The default value of ``COLUMN`` creates a figure
of appropriate size of insertion into an A4 LaTeX document in two-column mode.
``PAGE`` creates a full page width figure. ``GROW`` creates an image that
scales with parameters (1.5 inches per parameter). String arguments are not
case sensitive. If you pass a float, it will scale the default ``GROW`` by
that amount, so ``2.0`` would result in a plot 3 inches per parameter.
parameters : list[str]|int, optional
If set, only creates a plot for those specific parameters (if list). If an
integer is given, only plots the fist so many parameters.
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
extents : list[tuple[float]] or dict[str], optional
Extents are given as two-tuples. You can pass in a list the same size as
parameters (or default parameters if you don't specify parameters),
or as a dictionary.
filename : str, optional
If set, saves the figure to this location
display : bool, optional
If True, shows the figure using ``plt.show()``.
truth : list[float] or dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values indexed by key
legend : bool, optional
If true, creates a legend in your plot using the chain names.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
watermark : str, optional
A watermark to add to the figure
Returns
-------
figure
the matplotlib figure
|
entailment
|
def plot_walks(self, parameters=None, truth=None, extents=None, display=False,
filename=None, chains=None, convolve=None, figsize=None,
plot_weights=True, plot_posterior=True, log_weight=None): # pragma: no cover
""" Plots the chain walk; the parameter values as a function of step index.
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains are well behaved, or if certain parameters are suspect
or require a greater burn in period.
The desired outcome is to see an unchanging distribution along the x-axis of the plot.
If there are obvious tails or features in the parameters, you probably want
to investigate.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter.
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
convolve : int, optional
If set, overplots a smoothed version of the steps using ``convolve`` as
the width of the smoothing filter.
figsize : tuple, optional
If set, sets the created figure size.
plot_weights : bool, optional
If true, plots the weight if they are available
plot_posterior : bool, optional
If true, plots the log posterior if they are available
log_weight : bool, optional
Whether to display weights in log space or not. If None, the value is
inferred by the mean weights of the plotted chains.
Returns
-------
figure
the matplotlib figure created
"""
chains, parameters, truth, extents, _ = self._sanitise(chains, parameters, truth, extents)
n = len(parameters)
extra = 0
if plot_weights:
plot_weights = plot_weights and np.any([np.any(c.weights != 1.0) for c in chains])
plot_posterior = plot_posterior and np.any([c.posterior is not None for c in chains])
if plot_weights:
extra += 1
if plot_posterior:
extra += 1
if figsize is None:
figsize = (8, 0.75 + (n + extra))
fig, axes = plt.subplots(figsize=figsize, nrows=n + extra, squeeze=False, sharex=True)
for i, axes_row in enumerate(axes):
ax = axes_row[0]
if i >= extra:
p = parameters[i - n]
for chain in chains:
if p in chain.parameters:
chain_row = chain.get_data(p)
self._plot_walk(ax, p, chain_row, extents=extents.get(p), convolve=convolve, color=chain.config["color"])
if truth.get(p) is not None:
self._plot_walk_truth(ax, truth.get(p))
else:
if i == 0 and plot_posterior:
for chain in chains:
if chain.posterior is not None:
self._plot_walk(ax, "$\log(P)$", chain.posterior - chain.posterior.max(),
convolve=convolve, color=chain.config["color"])
else:
if log_weight is None:
log_weight = np.any([chain.weights.mean() < 0.1 for chain in chains])
if log_weight:
for chain in chains:
self._plot_walk(ax, r"$\log_{10}(w)$", np.log10(chain.weights),
convolve=convolve, color=chain.config["color"])
else:
for chain in chains:
self._plot_walk(ax, "$w$", chain.weights,
convolve=convolve, color=chain.config["color"])
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, 300)
if display:
plt.show()
return fig
|
Plots the chain walk; the parameter values as a function of step index.
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains are well behaved, or if certain parameters are suspect
or require a greater burn in period.
The desired outcome is to see an unchanging distribution along the x-axis of the plot.
If there are obvious tails or features in the parameters, you probably want
to investigate.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter.
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
convolve : int, optional
If set, overplots a smoothed version of the steps using ``convolve`` as
the width of the smoothing filter.
figsize : tuple, optional
If set, sets the created figure size.
plot_weights : bool, optional
If true, plots the weight if they are available
plot_posterior : bool, optional
If true, plots the log posterior if they are available
log_weight : bool, optional
Whether to display weights in log space or not. If None, the value is
inferred by the mean weights of the plotted chains.
Returns
-------
figure
the matplotlib figure created
|
entailment
|
def plot_distributions(self, parameters=None, truth=None, extents=None, display=False,
filename=None, chains=None, col_wrap=4, figsize=None, blind=None): # pragma: no cover
""" Plots the 1D parameter distributions for verification purposes.
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains give well behaved distributions, or if certain parameters are suspect
or require a greater burn in period.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter.
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
col_wrap : int, optional
How many columns to plot before wrapping.
figsize : tuple(float)|float, optional
Either a tuple specifying the figure size or a float scaling factor.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
Returns
-------
figure
the matplotlib figure created
"""
chains, parameters, truth, extents, blind = self._sanitise(chains, parameters, truth, extents, blind=blind)
n = len(parameters)
num_cols = min(n, col_wrap)
num_rows = int(np.ceil(1.0 * n / col_wrap))
if figsize is None:
figsize = 1.0
if isinstance(figsize, float):
figsize_float = figsize
figsize = (num_cols * 2 * figsize, num_rows * 2 * figsize)
else:
figsize_float = 1.0
summary = self.parent.config["summary"]
label_font_size = self.parent.config["label_font_size"]
tick_font_size = self.parent.config["tick_font_size"]
max_ticks = self.parent.config["max_ticks"]
diagonal_tick_labels = self.parent.config["diagonal_tick_labels"]
if summary is None:
summary = len(self.parent.chains) == 1
hspace = (0.8 if summary else 0.5) / figsize_float
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=figsize, squeeze=False)
fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1, wspace=0.05, hspace=hspace)
formatter = ScalarFormatter(useOffset=False)
formatter.set_powerlimits((-3, 4))
for i, ax in enumerate(axes.flatten()):
if i >= len(parameters):
ax.set_axis_off()
continue
p = parameters[i]
ax.set_yticks([])
if p in blind:
ax.set_xticks([])
else:
if diagonal_tick_labels:
_ = [l.set_rotation(45) for l in ax.get_xticklabels()]
_ = [l.set_fontsize(tick_font_size) for l in ax.get_xticklabels()]
ax.xaxis.set_major_locator(MaxNLocator(max_ticks, prune="lower"))
ax.xaxis.set_major_formatter(formatter)
ax.set_xlim(extents.get(p) or self._get_parameter_extents(p, chains))
max_val = None
for chain in chains:
if p in chain.parameters:
param_summary = summary and p not in blind
m = self._plot_bars(ax, p, chain, summary=param_summary)
if max_val is None or m > max_val:
max_val = m
self._add_truth(ax, truth, None, py=p)
ax.set_ylim(0, 1.1 * max_val)
ax.set_xlabel(p, fontsize=label_font_size)
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, 300)
if display:
plt.show()
return fig
|
Plots the 1D parameter distributions for verification purposes.
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains give well behaved distributions, or if certain parameters are suspect
or require a greater burn in period.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter.
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
col_wrap : int, optional
How many columns to plot before wrapping.
figsize : tuple(float)|float, optional
Either a tuple specifying the figure size or a float scaling factor.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
Returns
-------
figure
the matplotlib figure created
|
entailment
|
def plot_summary(self, parameters=None, truth=None, extents=None, display=False,
filename=None, chains=None, figsize=1.0, errorbar=False, include_truth_chain=True,
blind=None, watermark=None, extra_parameter_spacing=0.5,
vertical_spacing_ratio=1.0, show_names=True): # pragma: no cover
""" Plots parameter summaries
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains give well behaved distributions, or if certain parameters are suspect
or require a greater burn in period.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|list|list[float]|dict[str]|str, optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter. Each "truth value" can be either a float (will
draw a vertical line), two floats (a shaded interval) or three floats (min, mean, max),
which renders as a shaded interval with a line for the mean. Or, supply a string
which matches a chain name, and the results for that chain will be used as the 'truth'
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
figsize : float, optional
Scale horizontal and vertical figure size.
errorbar : bool, optional
Whether to onle plot an error bar, instead of the marginalised distribution.
include_truth_chain : bool, optional
If you specify another chain as the truth chain, determine if it should still
be plotted.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
watermark : str, optional
A watermark to add to the figure
extra_parameter_spacing : float, optional
Increase horizontal space for parameter values
vertical_spacing_ratio : float, optional
Increase vertical space for each model
show_names : bool, optional
Whether to show chain names or not. Defaults to `True`.
Returns
-------
figure
the matplotlib figure created
"""
wide_extents = not errorbar
chains, parameters, truth, extents, blind = self._sanitise(chains, parameters, truth, extents, blind=blind, wide_extents=wide_extents)
all_names = [c.name for c in self.parent.chains]
# Check if we're using a chain for truth values
if isinstance(truth, str):
assert truth in all_names, "Truth chain %s is not in the list of added chains %s" % (truth, all_names)
if not include_truth_chain:
chains = [c for c in chains if c.name != truth]
truth = self.parent.analysis.get_summary(chains=truth, parameters=parameters)
max_param = self._get_size_of_texts(parameters)
fid_dpi = 65 # Seriously I have no idea what value this should be
param_width = extra_parameter_spacing + max(0.5, max_param / fid_dpi)
if show_names:
max_model_name = self._get_size_of_texts([chain.name for chain in chains])
model_width = 0.25 + (max_model_name / fid_dpi)
gridspec_kw = {'width_ratios': [model_width] + [param_width] * len(parameters), 'height_ratios': [1] * len(chains)}
ncols = 1 + len(parameters)
else:
model_width = 0
gridspec_kw = {'width_ratios': [param_width] * len(parameters), 'height_ratios': [1] * len(chains)}
ncols = len(parameters)
top_spacing = 0.3
bottom_spacing = 0.2
row_height = (0.5 if not errorbar else 0.3) * vertical_spacing_ratio
width = param_width * len(parameters) + model_width
height = top_spacing + bottom_spacing + row_height * len(chains)
top_ratio = 1 - (top_spacing / height)
bottom_ratio = bottom_spacing / height
figsize = (width * figsize, height * figsize)
fig, axes = plt.subplots(nrows=len(chains), ncols=ncols, figsize=figsize, squeeze=False, gridspec_kw=gridspec_kw)
fig.subplots_adjust(left=0.05, right=0.95, top=top_ratio, bottom=bottom_ratio, wspace=0.0, hspace=0.0)
label_font_size = self.parent.config["label_font_size"]
legend_color_text = self.parent.config["legend_color_text"]
max_vals = {}
for i, row in enumerate(axes):
chain = chains[i]
cs, ws, ps, = chain.chain, chain.weights, chain.parameters
gs, ns = chain.grid, chain.name
colour = chain.config["color"]
# First one put name of model
if show_names:
ax_first = row[0]
ax_first.set_axis_off()
text_colour = "k" if not legend_color_text else colour
ax_first.text(0, 0.5, ns, transform=ax_first.transAxes, fontsize=label_font_size, verticalalignment="center", color=text_colour, weight="medium")
cols = row[1:]
else:
cols = row
for ax, p in zip(cols, parameters):
# Set up the frames
if i > 0:
ax.spines['top'].set_visible(False)
if i < (len(chains) - 1):
ax.spines['bottom'].set_visible(False)
if i < (len(chains) - 1) or p in blind:
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(extents[p])
# Put title in
if i == 0:
ax.set_title(r"$%s$" % p, fontsize=label_font_size)
# Add truth values
truth_value = truth.get(p)
if truth_value is not None:
if isinstance(truth_value, float) or isinstance(truth_value, int):
truth_mean = truth_value
truth_min, truth_max = None, None
else:
if len(truth_value) == 1:
truth_mean = truth_value
truth_min, truth_max = None, None
elif len(truth_value) == 2:
truth_min, truth_max = truth_value
truth_mean = None
else:
truth_min, truth_mean, truth_max = truth_value
if truth_mean is not None:
ax.axvline(truth_mean, **self.parent.config_truth)
if truth_min is not None and truth_max is not None:
ax.axvspan(truth_min, truth_max, color=self.parent.config_truth["color"], alpha=0.15, lw=0)
# Skip if this chain doesnt have the parameter
if p not in ps:
continue
# Plot the good stuff
if errorbar:
fv = self.parent.analysis.get_parameter_summary(chain, p)
if fv[0] is not None and fv[2] is not None:
diff = np.abs(np.diff(fv))
ax.errorbar([fv[1]], 0, xerr=[[diff[0]], [diff[1]]], fmt='o', color=colour)
else:
m = self._plot_bars(ax, p, chain)
if max_vals.get(p) is None or m > max_vals.get(p):
max_vals[p] = m
for i, row in enumerate(axes):
index = 1 if show_names else 0
for ax, p in zip(row[index:], parameters):
if not errorbar:
ax.set_ylim(0, 1.1 * max_vals[p])
dpi = 300
if watermark:
ax = None
self._add_watermark(fig, ax, figsize, watermark, dpi=dpi, size_scale=0.8)
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, dpi)
if display:
plt.show()
return fig
|
Plots parameter summaries
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains give well behaved distributions, or if certain parameters are suspect
or require a greater burn in period.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|list|list[float]|dict[str]|str, optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter. Each "truth value" can be either a float (will
draw a vertical line), two floats (a shaded interval) or three floats (min, mean, max),
which renders as a shaded interval with a line for the mean. Or, supply a string
which matches a chain name, and the results for that chain will be used as the 'truth'
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
figsize : float, optional
Scale horizontal and vertical figure size.
errorbar : bool, optional
Whether to onle plot an error bar, instead of the marginalised distribution.
include_truth_chain : bool, optional
If you specify another chain as the truth chain, determine if it should still
be plotted.
blind : bool|string|list[string], optional
Whether to blind axes values. Can be set to `True` to blind all parameters,
or can pass in a string (or list of strings) which specify the parameters to blind.
watermark : str, optional
A watermark to add to the figure
extra_parameter_spacing : float, optional
Increase horizontal space for parameter values
vertical_spacing_ratio : float, optional
Increase vertical space for each model
show_names : bool, optional
Whether to show chain names or not. Defaults to `True`.
Returns
-------
figure
the matplotlib figure created
|
entailment
|
def gelman_rubin(self, chain=None, threshold=0.05):
r""" Runs the Gelman Rubin diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
threshold : float, optional
The maximum deviation permitted from 1 for the final value
:math:`\hat{R}`
Returns
-------
float
whether or not the chains pass the test
Notes
-----
I follow PyMC in calculating the Gelman-Rubin statistic, where,
having :math:`m` chains of length :math:`n`, we compute
.. math::
B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2
W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right]
where :math:`\theta` represents each model parameter. We then compute
:math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio
:math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters,
this ratio deviates from unity by less than the supplied threshold.
"""
if chain is None:
return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
parameters = chain.parameters
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker"
m = 1.0 * len(chains)
n = 1.0 * chains[0].shape[0]
all_mean = np.mean(data, axis=0)
chain_means = np.array([np.mean(c, axis=0) for c in chains])
chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains])
b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0)
w = (1 / m) * chain_var.sum(axis=0)
var = (n - 1) * w / n + b / n
v = var + b / (n * m)
R = np.sqrt(v / w)
passed = np.abs(R - 1) < threshold
print("Gelman-Rubin Statistic values for chain %s" % name)
for p, v, pas in zip(parameters, R, passed):
param = "Param %d" % p if isinstance(p, int) else p
print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed"))
return np.all(passed)
|
r""" Runs the Gelman Rubin diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
threshold : float, optional
The maximum deviation permitted from 1 for the final value
:math:`\hat{R}`
Returns
-------
float
whether or not the chains pass the test
Notes
-----
I follow PyMC in calculating the Gelman-Rubin statistic, where,
having :math:`m` chains of length :math:`n`, we compute
.. math::
B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2
W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right]
where :math:`\theta` represents each model parameter. We then compute
:math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio
:math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters,
this ratio deviates from unity by less than the supplied threshold.
|
entailment
|
def geweke(self, chain=None, first=0.1, last=0.5, threshold=0.05):
""" Runs the Geweke diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
first : float, optional
The amount of the start of the chain to use
last : float, optional
The end amount of the chain to use
threshold : float, optional
The p-value to use when testing for normality.
Returns
-------
float
whether or not the chains pass the test
"""
if chain is None:
return np.all([self.geweke(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
assert num_walkers is not None and num_walkers > 0, \
"You need to specify the number of walkers to use the Geweke diagnostic."
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
n = 1.0 * chains[0].shape[0]
n_start = int(np.floor(first * n))
n_end = int(np.floor((1 - last) * n))
mean_start = np.array([np.mean(c[:n_start, i])
for c in chains for i in range(c.shape[1])])
var_start = np.array([self._spec(c[:n_start, i]) / c[:n_start, i].size
for c in chains for i in range(c.shape[1])])
mean_end = np.array([np.mean(c[n_end:, i])
for c in chains for i in range(c.shape[1])])
var_end = np.array([self._spec(c[n_end:, i]) / c[n_end:, i].size
for c in chains for i in range(c.shape[1])])
zs = (mean_start - mean_end) / (np.sqrt(var_start + var_end))
_, pvalue = normaltest(zs)
print("Gweke Statistic for chain %s has p-value %e" % (name, pvalue))
return pvalue > threshold
|
Runs the Geweke diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
first : float, optional
The amount of the start of the chain to use
last : float, optional
The end amount of the chain to use
threshold : float, optional
The p-value to use when testing for normality.
Returns
-------
float
whether or not the chains pass the test
|
entailment
|
def get_latex_table(self, parameters=None, transpose=False, caption=None,
label="tab:model_params", hlines=True, blank_fill="--"): # pragma: no cover
""" Generates a LaTeX table from parameter summaries.
Parameters
----------
parameters : list[str], optional
A list of what parameters to include in the table. By default, includes all parameters
transpose : bool, optional
Defaults to False, which gives each column as a parameter, each chain (framework)
as a row. You can swap it so that you have a parameter each row and a framework
each column by setting this to True
caption : str, optional
If you want to generate a caption for the table through Python, use this.
Defaults to an empty string
label : str, optional
If you want to generate a label for the table through Python, use this.
Defaults to an empty string
hlines : bool, optional
Inserts ``\\hline`` before and after the header, and at the end of table.
blank_fill : str, optional
If a framework does not have a particular parameter, will fill that cell of
the table with this string.
Returns
-------
str
the LaTeX table.
"""
if parameters is None:
parameters = self.parent._all_parameters
for p in parameters:
assert isinstance(p, str), \
"Generating a LaTeX table requires all parameters have labels"
num_parameters = len(parameters)
num_chains = len(self.parent.chains)
fit_values = self.get_summary(squeeze=False)
if label is None:
label = ""
if caption is None:
caption = ""
end_text = " \\\\ \n"
if transpose:
column_text = "c" * (num_chains + 1)
else:
column_text = "c" * (num_parameters + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text + "\t\t"
if transpose:
center_text += " & ".join(["Parameter"] + [c.name for c in self.parent.chains]) + end_text
if hlines:
center_text += "\t\t" + hline_text
for p in parameters:
arr = ["\t\t" + p]
for chain_res in fit_values:
if p in chain_res:
arr.append(self.get_parameter_text(*chain_res[p], wrap=True))
else:
arr.append(blank_fill)
center_text += " & ".join(arr) + end_text
else:
center_text += " & ".join(["Model"] + parameters) + end_text
if hlines:
center_text += "\t\t" + hline_text
for name, chain_res in zip([c.name for c in self.parent.chains], fit_values):
arr = ["\t\t" + name]
for p in parameters:
if p in chain_res:
arr.append(self.get_parameter_text(*chain_res[p], wrap=True))
else:
arr.append(blank_fill)
center_text += " & ".join(arr) + end_text
if hlines:
center_text += "\t\t" + hline_text
final_text = get_latex_table_frame(caption, label) % (column_text, center_text)
return final_text
|
Generates a LaTeX table from parameter summaries.
Parameters
----------
parameters : list[str], optional
A list of what parameters to include in the table. By default, includes all parameters
transpose : bool, optional
Defaults to False, which gives each column as a parameter, each chain (framework)
as a row. You can swap it so that you have a parameter each row and a framework
each column by setting this to True
caption : str, optional
If you want to generate a caption for the table through Python, use this.
Defaults to an empty string
label : str, optional
If you want to generate a label for the table through Python, use this.
Defaults to an empty string
hlines : bool, optional
Inserts ``\\hline`` before and after the header, and at the end of table.
blank_fill : str, optional
If a framework does not have a particular parameter, will fill that cell of
the table with this string.
Returns
-------
str
the LaTeX table.
|
entailment
|
def get_summary(self, squeeze=True, parameters=None, chains=None):
""" Gets a summary of the marginalised parameter distributions.
Parameters
----------
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
parameters : list[str], optional
A list of parameters which to generate summaries for.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of dictionaries
One entry per chain, parameter bounds stored in dictionary with parameter as key
"""
results = []
if chains is None:
chains = self.parent.chains
else:
if isinstance(chains, (int, str)):
chains = [chains]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
for chain in chains:
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p not in chain.parameters:
continue
summary = self.get_parameter_summary(chain, p)
res[p] = summary
results.append(res)
if squeeze and len(results) == 1:
return results[0]
return results
|
Gets a summary of the marginalised parameter distributions.
Parameters
----------
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
parameters : list[str], optional
A list of parameters which to generate summaries for.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of dictionaries
One entry per chain, parameter bounds stored in dictionary with parameter as key
|
entailment
|
def get_max_posteriors(self, parameters=None, squeeze=True, chains=None):
""" Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
"""
results = []
if chains is None:
chains = self.parent.chains
else:
if isinstance(chains, (int, str)):
chains = [chains]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
if isinstance(parameters, str):
parameters = [parameters]
for chain in chains:
if chain.posterior_max_index is None:
results.append(None)
continue
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p in chain.parameters:
res[p] = chain.posterior_max_params[p]
results.append(res)
if squeeze and len(results) == 1:
return results[0]
return results
|
Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
|
entailment
|
def get_correlations(self, chain=0, parameters=None):
"""
Takes a chain and returns the correlation between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D correlation matrix.
"""
parameters, cov = self.get_covariance(chain=chain, parameters=parameters)
diag = np.sqrt(np.diag(cov))
divisor = diag[None, :] * diag[:, None]
correlations = cov / divisor
return parameters, correlations
|
Takes a chain and returns the correlation between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D correlation matrix.
|
entailment
|
def get_covariance(self, chain=0, parameters=None):
"""
Takes a chain and returns the covariance between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D covariance matrix.
"""
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
if parameters is None:
parameters = chain.parameters
data = chain.get_data(parameters)
cov = np.atleast_2d(np.cov(data, aweights=chain.weights, rowvar=False))
return parameters, cov
|
Takes a chain and returns the covariance between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D covariance matrix.
|
entailment
|
def get_correlation_table(self, chain=0, parameters=None, caption="Parameter Correlations",
label="tab:parameter_correlations"):
"""
Gets a LaTeX table of parameter correlations.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
"""
parameters, cor = self.get_correlations(chain=chain, parameters=parameters)
return self._get_2d_latex_table(parameters, cor, caption, label)
|
Gets a LaTeX table of parameter correlations.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
|
entailment
|
def get_covariance_table(self, chain=0, parameters=None, caption="Parameter Covariance",
label="tab:parameter_covariance"):
"""
Gets a LaTeX table of parameter covariance.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
"""
parameters, cov = self.get_covariance(chain=chain, parameters=parameters)
return self._get_2d_latex_table(parameters, cov, caption, label)
|
Gets a LaTeX table of parameter covariance.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
|
entailment
|
def get_parameter_text(self, lower, maximum, upper, wrap=False):
""" Generates LaTeX appropriate text from marginalised parameter bounds.
Parameters
----------
lower : float
The lower bound on the parameter
maximum : float
The value of the parameter with maximum probability
upper : float
The upper bound on the parameter
wrap : bool
Wrap output text in dollar signs for LaTeX
Returns
-------
str
The formatted text given the parameter bounds
"""
if lower is None or upper is None:
return ""
upper_error = upper - maximum
lower_error = maximum - lower
if upper_error != 0 and lower_error != 0:
resolution = min(np.floor(np.log10(np.abs(upper_error))),
np.floor(np.log10(np.abs(lower_error))))
elif upper_error == 0 and lower_error != 0:
resolution = np.floor(np.log10(np.abs(lower_error)))
elif upper_error != 0 and lower_error == 0:
resolution = np.floor(np.log10(np.abs(upper_error)))
else:
resolution = np.floor(np.log10(np.abs(maximum)))
factor = 0
fmt = "%0.1f"
r = 1
if np.abs(resolution) > 2:
factor = -resolution
if resolution == 2:
fmt = "%0.0f"
factor = -1
r = 0
if resolution == 1:
fmt = "%0.0f"
if resolution == -1:
fmt = "%0.2f"
r = 2
elif resolution == -2:
fmt = "%0.3f"
r = 3
upper_error *= 10 ** factor
lower_error *= 10 ** factor
maximum *= 10 ** factor
upper_error = round(upper_error, r)
lower_error = round(lower_error, r)
maximum = round(maximum, r)
if maximum == -0.0:
maximum = 0.0
if resolution == 2:
upper_error *= 10 ** -factor
lower_error *= 10 ** -factor
maximum *= 10 ** -factor
factor = 0
fmt = "%0.0f"
upper_error_text = fmt % upper_error
lower_error_text = fmt % lower_error
if upper_error_text == lower_error_text:
text = r"%s\pm %s" % (fmt, "%s") % (maximum, lower_error_text)
else:
text = r"%s^{+%s}_{-%s}" % (fmt, "%s", "%s") % \
(maximum, upper_error_text, lower_error_text)
if factor != 0:
text = r"\left( %s \right) \times 10^{%d}" % (text, -factor)
if wrap:
text = "$%s$" % text
return text
|
Generates LaTeX appropriate text from marginalised parameter bounds.
Parameters
----------
lower : float
The lower bound on the parameter
maximum : float
The value of the parameter with maximum probability
upper : float
The upper bound on the parameter
wrap : bool
Wrap output text in dollar signs for LaTeX
Returns
-------
str
The formatted text given the parameter bounds
|
entailment
|
def add_chain(self, chain, parameters=None, name=None, weights=None, posterior=None, walkers=None,
grid=False, num_eff_data_points=None, num_free_params=None, color=None, linewidth=None,
linestyle=None, kde=None, shade=None, shade_alpha=None, power=None, marker_style=None, marker_size=None,
marker_alpha=None, plot_contour=None, plot_point=None, statistics=None, cloud=None,
shade_gradient=None, bar_shade=None, bins=None, smooth=None, color_params=None,
plot_color_params=None, cmap=None, num_cloud=None):
"""
Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
is_dict = False
assert chain is not None, "You cannot have a chain of None"
if isinstance(chain, str):
if chain.endswith("txt"):
chain = np.loadtxt(chain)
else:
chain = np.load(chain)
elif isinstance(chain, dict):
assert parameters is None, \
"You cannot pass a dictionary and specify parameter names"
is_dict = True
parameters = list(chain.keys())
chain = np.array([chain[p] for p in parameters]).T
elif isinstance(chain, list):
chain = np.array(chain).T
if grid:
assert walkers is None, "If grid is set, walkers should not be"
assert weights is not None, "If grid is set, you need to supply weights"
if len(weights.shape) > 1:
assert not is_dict, "We cannot construct a meshgrid from a dictionary, as the parameters" \
"are no longer ordered. Please pass in a flattened array instead."
self._logger.info("Constructing meshgrid for grid results")
meshes = np.meshgrid(*[u for u in chain.T], indexing="ij")
chain = np.vstack([m.flatten() for m in meshes]).T
weights = weights.flatten()
assert weights.size == chain[:,
0].size, "Error, given weight array size disagrees with parameter sampling"
if len(chain.shape) == 1:
chain = chain[None].T
if name is None:
name = "Chain %d" % len(self.chains)
if power is not None:
assert isinstance(power, int) or isinstance(power, float), "Power should be numeric, but is %s" % type(
power)
if self._default_parameters is None and parameters is not None:
self._default_parameters = parameters
if parameters is None:
if self._default_parameters is not None:
assert chain.shape[1] == len(self._default_parameters), \
"Chain has %d dimensions, but default parameters have %d dimensions" \
% (chain.shape[1], len(self._default_parameters))
parameters = self._default_parameters
self._logger.debug("Adding chain using default parameters")
else:
self._logger.debug("Adding chain with no parameter names")
parameters = ["%d" % x for x in range(chain.shape[1])]
else:
self._logger.debug("Adding chain with defined parameters")
assert len(parameters) <= chain.shape[1], \
"Have only %d columns in chain, but have been given %d parameters names! " \
"Please double check this." % (chain.shape[1], len(parameters))
for p in parameters:
if p not in self._all_parameters:
self._all_parameters.append(p)
# Sorry, no KDE for you on a grid.
if grid:
kde = None
if color is not None:
color = self.color_finder.get_formatted([color])[0]
c = Chain(chain, parameters, name, weights=weights, posterior=posterior, walkers=walkers,
grid=grid, num_free_params=num_free_params, num_eff_data_points=num_eff_data_points,
color=color, linewidth=linewidth, linestyle=linestyle, kde=kde, shade_alpha=shade_alpha, power=power,
marker_style=marker_style, marker_size=marker_size, marker_alpha=marker_alpha,
plot_contour=plot_contour, plot_point=plot_point, statistics=statistics, cloud=cloud,
shade=shade, shade_gradient=shade_gradient, bar_shade=bar_shade, bins=bins, smooth=smooth,
color_params=color_params, plot_color_params=plot_color_params, cmap=cmap,
num_cloud=num_cloud)
self.chains.append(c)
self._init_params()
return self
|
Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
|
entailment
|
def remove_chain(self, chain=-1):
"""
Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone!
Parameters
----------
chain : int|str, list[str|int]
The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it.
By default removes the last chain added.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
if isinstance(chain, str) or isinstance(chain, int):
chain = [chain]
chain = sorted([i for c in chain for i in self._get_chain(c)])[::-1]
assert len(chain) == len(list(set(chain))), "Error, you are trying to remove a chain more than once."
for index in chain:
del self.chains[index]
seen = set()
self._all_parameters = [p for c in self.chains for p in c.parameters if not (p in seen or seen.add(p))]
# Need to reconfigure
self._init_params()
return self
|
Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone!
Parameters
----------
chain : int|str, list[str|int]
The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it.
By default removes the last chain added.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
|
entailment
|
def configure(self, statistics="max", max_ticks=5, plot_hists=True, flip=True,
serif=True, sigma2d=False, sigmas=None, summary=None, bins=None, rainbow=None,
colors=None, linestyles=None, linewidths=None, kde=False, smooth=None,
cloud=None, shade=None, shade_alpha=None, shade_gradient=None, bar_shade=None,
num_cloud=None, color_params=None, plot_color_params=False, cmaps=None,
plot_contour=None, plot_point=None, global_point=True, marker_style=None, marker_size=None, marker_alpha=None,
usetex=True, diagonal_tick_labels=True, label_font_size=12, tick_font_size=10,
spacing=None, contour_labels=None, contour_label_font_size=10,
legend_kwargs=None, legend_location=None, legend_artists=None,
legend_color_text=True, watermark_text_kwargs=None, summary_area=0.6827): # pragma: no cover
r""" Configure the general plotting parameters common across the bar
and contour plots.
If you do not call this explicitly, the :func:`plot`
method will invoke this method automatically.
Please ensure that you call this method *after* adding all the relevant data to the
chain consumer, as the consume changes configuration values depending on
the supplied data.
Parameters
----------
statistics : string|list[str], optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
max_ticks : int, optional
The maximum number of ticks to use on the plots
plot_hists : bool, optional
Whether to plot marginalised distributions or not
flip : bool, optional
Set to false if, when plotting only two parameters, you do not want it to
rotate the histogram so that it is horizontal.
sigma2d: bool, optional
Defaults to `False`. When `False`, uses :math:`\sigma` levels for 1D Gaussians - ie confidence
levels of 68% and 95%. When `True`, uses the confidence levels for 2D Gaussians, where 1 and 2
:math:`\sigma` represents 39% and 86% confidence levels respectively.
sigmas : np.array, optional
The :math:`\sigma` contour levels to plot. Defaults to [0, 1, 2, 3] for a single chain
and [0, 1, 2] for multiple chains.
serif : bool, optional
Whether to display ticks and labels with serif font.
summary : bool, optional
If overridden, sets whether parameter summaries should be set as axis titles.
Will not work if you have multiple chains
bins : int|float,list[int|float], optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE.
rainbow : bool|list[bool], optional
Set to True to force use of rainbow colours
colors : str(hex)|list[str(hex)], optional
Provide a list of colours to use for each chain. If you provide more chains
than colours, you *will* get the rainbow colour spectrum. If you only pass
one colour, all chains are set to this colour. This probably won't look good.
linestyles : str|list[str], optional
Provide a list of line styles to plot the contours and marginalised
distributions with. By default, this will become a list of solid lines. If a
string is passed instead of a list, this style is used for all chains.
linewidths : float|list[float], optional
Provide a list of line widths to plot the contours and marginalised
distributions with. By default, this is a width of 1. If a float
is passed instead of a list, this width is used for all chains.
kde : bool|float|list[bool|float], optional
Whether to use a Gaussian KDE to smooth marginalised posteriors. If false, uses
bins and linear interpolation, so ensure you have plenty of samples if your
distribution is highly non-gaussian. Due to the slowness of performing a
KDE on all data, it is often useful to disable this before producing final
plots. If float, scales the width of the KDE bandpass manually.
smooth : int|list[int], optional
Defaults to 3. How much to smooth the marginalised distributions using a gaussian filter.
If ``kde`` is set to true, this parameter is ignored. Setting it to either
``0``, ``False`` disables smoothing. For grid data, smoothing
is set to 0 by default, not 3.
cloud : bool|list[bool], optional
If set, overrides the default behaviour and plots the cloud or not
shade : bool|list[bool] optional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float|list[float], optional
Filled contour alpha value override. Default is 1.0. If a list is passed, you can set the
shade opacity for specific chains.
shade_gradient : float|list[float], optional
How much to vary colours in different contour levels.
bar_shade : bool|list[bool], optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
num_cloud : int|list[int], optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
color_params : str|list[str], optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool|list[bool], optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str|list[str], optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
plot_contour : bool|list[bool], optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool|list[bool], optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
global_point : bool, optional
Whether the point which gets plotted is the global posterior maximum, or the marginalised 2D
posterior maximum. Note that when you use marginalised 2D maximums for the points, you do not
get the 1D histograms. Defaults to `True`, for a global maximum value.
marker_style : str|list[str], optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|list[numeric], optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric|list[numeric], optional
The alpha values when plotting markers.
usetex : bool, optional
Whether or not to parse text as LaTeX in plots.
diagonal_tick_labels : bool, optional
Whether to display tick labels on a 45 degree angle.
label_font_size : int|float, optional
The font size for plot axis labels and axis titles if summaries are configured to display.
tick_font_size : int|float, optional
The font size for the tick labels in the plots.
spacing : float, optional
The amount of spacing to add between plots. Defaults to `None`, which equates to 1.0 for less
than 6 dimensions and 0.0 for higher dimensions.
contour_labels : string, optional
If unset do not plot contour labels. If set to "confidence", label the using confidence
intervals. If set to "sigma", labels using sigma.
contour_label_font_size : int|float, optional
The font size for contour labels, if they are enabled.
legend_kwargs : dict, optional
Extra arguments to pass to the legend api.
legend_location : tuple(int,int), optional
Specifies the subplot in which to locate the legend. By default, this will be (0, -1),
corresponding to the top right subplot if there are more than two parameters,
and the bottom left plot for only two parameters with flip on.
For having the legend in the primary subplot
in the bottom left, set to (-1,0).
legend_artists : bool, optional
Whether to include hide artists in the legend. If all linestyles and line widths are identical,
this will default to false (as only the colours change). Otherwise it will be true.
legend_color_text : bool, optional
Whether to colour the legend text.
watermark_text_kwargs : dict, optional
Options to pass to the fontdict property when generating text for the watermark.
summary_area : float, optional
The confidence interval used when generating parameter summaries. Defaults to 1 sigma, aka 0.6827
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
# Dirty way of ensuring overrides happen when requested
l = locals()
explicit = []
for k in l.keys():
if l[k] is not None:
explicit.append(k)
if k.endswith("s"):
explicit.append(k[:-1])
self._init_params()
num_chains = len(self.chains)
assert rainbow is None or colors is None, \
"You cannot both ask for rainbow colours and then give explicit colours"
# Determine statistics
assert statistics is not None, "statistics should be a string or list of strings!"
if isinstance(statistics, str):
assert statistics in list(Analysis.summaries), "statistics %s not recognised. Should be in %s" % (statistics, Analysis.summaries)
statistics = [statistics.lower()] * len(self.chains)
elif isinstance(statistics, list):
for i, l in enumerate(statistics):
statistics[i] = l.lower()
else:
raise ValueError("statistics is not a string or a list!")
# Determine KDEs
if isinstance(kde, bool) or isinstance(kde, float):
kde = [False if c.grid else kde for c in self.chains]
kde_override = [c.kde for c in self.chains]
kde = [c2 if c2 is not None else c1 for c1, c2 in zip(kde, kde_override)]
# Determine bins
if bins is None:
bins = get_bins(self.chains)
elif isinstance(bins, list):
bins = [b2 if isinstance(b2, int) else np.floor(b2 * b1) for b1, b2 in zip(get_bins(self.chains), bins)]
elif isinstance(bins, float):
bins = [np.floor(b * bins) for b in get_bins(self.chains)]
elif isinstance(bins, int):
bins = [bins] * len(self.chains)
else:
raise ValueError("bins value is not a recognised class (float or int)")
# Determine smoothing
if smooth is None:
smooth = [0 if c.grid or k else 3 for c, k in zip(self.chains, kde)]
else:
if smooth is not None and not smooth:
smooth = 0
if isinstance(smooth, list):
smooth = [0 if k else s for s, k in zip(smooth, kde)]
else:
smooth = [0 if k else smooth for k in kde]
# Determine color parameters
if color_params is None:
color_params = [None] * num_chains
else:
if isinstance(color_params, str):
color_params = [
color_params if color_params in cs.parameters + ["log_weights", "weights", "posterior"] else None
for cs in self.chains]
color_params = [None if c == "posterior" and self.chains[i].posterior is None else c for i, c in
enumerate(color_params)]
elif isinstance(color_params, list) or isinstance(color_params, tuple):
for c, chain in zip(color_params, self.chains):
p = chain.parameters
if c is not None:
assert c in p, "Color parameter %s not in parameters %s" % (c, p)
# Determine if we should plot color parameters
if isinstance(plot_color_params, bool):
plot_color_params = [plot_color_params] * len(color_params)
# Determine cmaps
if cmaps is None:
param_cmaps = {}
cmaps = []
i = 0
for cp in color_params:
if cp is None:
cmaps.append(None)
elif cp in param_cmaps:
cmaps.append(param_cmaps[cp])
else:
param_cmaps[cp] = self._cmaps[i]
cmaps.append(self._cmaps[i])
i = (i + 1) % len(self._cmaps)
# Determine colours
if colors is None:
if rainbow:
colors = self.color_finder.get_colormap(num_chains)
else:
if num_chains > len(self._all_colours):
num_needed_colours = np.sum([c is None for c in color_params])
colour_list = self.color_finder.get_colormap(num_needed_colours)
else:
colour_list = self._all_colours
colors = []
ci = 0
for c in color_params:
if c:
colors.append('#000000')
else:
colors.append(colour_list[ci])
ci += 1
elif isinstance(colors, str):
colors = [colors] * len(self.chains)
colors = self.color_finder.get_formatted(colors)
# Determine linestyles
if linestyles is None:
i = 0
linestyles = []
for c in color_params:
if c is None:
linestyles.append(self._linestyles[0])
else:
linestyles.append(self._linestyles[i])
i = (i + 1) % len(self._linestyles)
elif isinstance(linestyles, str):
linestyles = [linestyles] * len(self.chains)
# Determine linewidths
if linewidths is None:
linewidths = [1.0] * len(self.chains)
elif isinstance(linewidths, float) or isinstance(linewidths, int):
linewidths = [linewidths] * len(self.chains)
# Determine clouds
if cloud is None:
cloud = False
cloud = [cloud or c is not None for c in color_params]
# Determine cloud points
if num_cloud is None:
num_cloud = 30000
if isinstance(num_cloud, int) or isinstance(num_cloud, float):
num_cloud = [int(num_cloud)] * num_chains
# Should we shade the contours
if shade is None:
if shade_alpha is None:
shade = num_chains <= 3
else:
shade = True
if isinstance(shade, bool):
# If not overridden, do not shade chains with colour scatter points
shade = [shade and c is None for c in color_params]
# Modify shade alpha based on how many chains we have
if shade_alpha is None:
if num_chains == 1:
if contour_labels is not None:
shade_alpha = 0.75
else:
shade_alpha = 1.0
else:
shade_alpha = 1.0 / num_chains
# Decrease the shading amount if there are colour scatter points
if isinstance(shade_alpha, float) or isinstance(shade_alpha, int):
shade_alpha = [shade_alpha if c is None else 0.25 * shade_alpha for c in color_params]
if shade_gradient is None:
shade_gradient = 1.0
if isinstance(shade_gradient, float):
shade_gradient = [shade_gradient] * num_chains
elif isinstance(shade_gradient, list):
assert len(shade_gradient) == num_chains, \
"Have %d shade_gradient but % chains" % (len(shade_gradient), num_chains)
contour_over_points = num_chains < 20
if plot_contour is None:
plot_contour = [contour_over_points if chain.posterior is not None else True for chain in self.chains]
elif isinstance(plot_contour, bool):
plot_contour = [plot_contour] * num_chains
if plot_point is None:
plot_point = [not contour_over_points] * num_chains
elif isinstance(plot_point, bool):
plot_point = [plot_point] * num_chains
if marker_style is None:
marker_style = ['.'] * num_chains
elif isinstance(marker_style, str):
marker_style = [marker_style] * num_chains
if marker_size is None:
marker_size = [4] * num_chains
elif isinstance(marker_style, (int, float)):
marker_size = [marker_size] * num_chains
if marker_alpha is None:
marker_alpha = [1.0] * num_chains
elif isinstance(marker_alpha, (int, float)):
marker_alpha = [marker_alpha] * num_chains
# Figure out if we should display parameter summaries
if summary is not None:
summary = summary and num_chains == 1
# Figure out bar shading
if bar_shade is None:
bar_shade = num_chains <= 3
if isinstance(bar_shade, bool):
bar_shade = [bar_shade] * num_chains
# Figure out how many sigmas to plot
if sigmas is None:
if num_chains == 1:
sigmas = np.array([0, 1, 2])
else:
sigmas = np.array([0, 1, 2])
if sigmas[0] != 0:
sigmas = np.concatenate(([0], sigmas))
sigmas = np.sort(sigmas)
if contour_labels is not None:
assert isinstance(contour_labels, str), "contour_labels parameter should be a string"
contour_labels = contour_labels.lower()
assert contour_labels in ["sigma", "confidence"], "contour_labels should be either sigma or confidence"
assert isinstance(contour_label_font_size, int) or isinstance(contour_label_font_size, float), \
"contour_label_font_size needs to be numeric"
if legend_artists is None:
legend_artists = len(set(linestyles)) > 1 or len(set(linewidths)) > 1
if legend_kwargs is not None:
assert isinstance(legend_kwargs, dict), "legend_kwargs should be a dict"
else:
legend_kwargs = {}
if num_chains < 3:
labelspacing = 0.5
elif num_chains == 3:
labelspacing = 0.2
else:
labelspacing = 0.15
legend_kwargs_default = {
"labelspacing": labelspacing,
"loc": "upper right",
"frameon": False,
"fontsize": label_font_size,
"handlelength": 1,
"handletextpad": 0.2,
"borderaxespad": 0.0
}
legend_kwargs_default.update(legend_kwargs)
watermark_text_kwargs_default = {
"color": "#333333",
"alpha": 0.7,
"verticalalignment": "center",
"horizontalalignment": "center"
}
if watermark_text_kwargs is None:
watermark_text_kwargs = {}
watermark_text_kwargs_default.update(watermark_text_kwargs)
assert isinstance(summary_area, float), "summary_area needs to be a float, not %s!" % type(summary_area)
assert summary_area > 0, "summary_area should be a positive number, instead is %s!" % summary_area
assert summary_area < 1, "summary_area must be less than unity, instead is %s!" % summary_area
assert isinstance(global_point, bool), "global_point should be a bool"
# List options
for i, c in enumerate(self.chains):
try:
c.update_unset_config("statistics", statistics[i], override=explicit)
c.update_unset_config("color", colors[i], override=explicit)
c.update_unset_config("linestyle", linestyles[i], override=explicit)
c.update_unset_config("linewidth", linewidths[i], override=explicit)
c.update_unset_config("cloud", cloud[i], override=explicit)
c.update_unset_config("shade", shade[i], override=explicit)
c.update_unset_config("shade_alpha", shade_alpha[i], override=explicit)
c.update_unset_config("shade_gradient", shade_gradient[i], override=explicit)
c.update_unset_config("bar_shade", bar_shade[i], override=explicit)
c.update_unset_config("bins", bins[i], override=explicit)
c.update_unset_config("kde", kde[i], override=explicit)
c.update_unset_config("smooth", smooth[i], override=explicit)
c.update_unset_config("color_params", color_params[i], override=explicit)
c.update_unset_config("plot_color_params", plot_color_params[i], override=explicit)
c.update_unset_config("cmap", cmaps[i], override=explicit)
c.update_unset_config("num_cloud", num_cloud[i], override=explicit)
c.update_unset_config("marker_style", marker_style[i], override=explicit)
c.update_unset_config("marker_size", marker_size[i], override=explicit)
c.update_unset_config("marker_alpha", marker_alpha[i], override=explicit)
c.update_unset_config("plot_contour", plot_contour[i], override=explicit)
c.update_unset_config("plot_point", plot_point[i], override=explicit)
c.config["summary_area"] = summary_area
except IndentationError as e:
print("Index error when assigning chain properties, make sure you "
"have enough properties set for the number of chains you have loaded! "
"See the stack trace for which config item has the wrong number of entries.")
raise e
# Non list options
self.config["sigma2d"] = sigma2d
self.config["sigmas"] = sigmas
self.config["summary"] = summary
self.config["flip"] = flip
self.config["serif"] = serif
self.config["plot_hists"] = plot_hists
self.config["max_ticks"] = max_ticks
self.config["usetex"] = usetex
self.config["diagonal_tick_labels"] = diagonal_tick_labels
self.config["label_font_size"] = label_font_size
self.config["tick_font_size"] = tick_font_size
self.config["spacing"] = spacing
self.config["contour_labels"] = contour_labels
self.config["contour_label_font_size"] = contour_label_font_size
self.config["legend_location"] = legend_location
self.config["legend_kwargs"] = legend_kwargs_default
self.config["legend_artists"] = legend_artists
self.config["legend_color_text"] = legend_color_text
self.config["watermark_text_kwargs"] = watermark_text_kwargs_default
self.config["global_point"] = global_point
self._configured = True
return self
|
r""" Configure the general plotting parameters common across the bar
and contour plots.
If you do not call this explicitly, the :func:`plot`
method will invoke this method automatically.
Please ensure that you call this method *after* adding all the relevant data to the
chain consumer, as the consume changes configuration values depending on
the supplied data.
Parameters
----------
statistics : string|list[str], optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
max_ticks : int, optional
The maximum number of ticks to use on the plots
plot_hists : bool, optional
Whether to plot marginalised distributions or not
flip : bool, optional
Set to false if, when plotting only two parameters, you do not want it to
rotate the histogram so that it is horizontal.
sigma2d: bool, optional
Defaults to `False`. When `False`, uses :math:`\sigma` levels for 1D Gaussians - ie confidence
levels of 68% and 95%. When `True`, uses the confidence levels for 2D Gaussians, where 1 and 2
:math:`\sigma` represents 39% and 86% confidence levels respectively.
sigmas : np.array, optional
The :math:`\sigma` contour levels to plot. Defaults to [0, 1, 2, 3] for a single chain
and [0, 1, 2] for multiple chains.
serif : bool, optional
Whether to display ticks and labels with serif font.
summary : bool, optional
If overridden, sets whether parameter summaries should be set as axis titles.
Will not work if you have multiple chains
bins : int|float,list[int|float], optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE.
rainbow : bool|list[bool], optional
Set to True to force use of rainbow colours
colors : str(hex)|list[str(hex)], optional
Provide a list of colours to use for each chain. If you provide more chains
than colours, you *will* get the rainbow colour spectrum. If you only pass
one colour, all chains are set to this colour. This probably won't look good.
linestyles : str|list[str], optional
Provide a list of line styles to plot the contours and marginalised
distributions with. By default, this will become a list of solid lines. If a
string is passed instead of a list, this style is used for all chains.
linewidths : float|list[float], optional
Provide a list of line widths to plot the contours and marginalised
distributions with. By default, this is a width of 1. If a float
is passed instead of a list, this width is used for all chains.
kde : bool|float|list[bool|float], optional
Whether to use a Gaussian KDE to smooth marginalised posteriors. If false, uses
bins and linear interpolation, so ensure you have plenty of samples if your
distribution is highly non-gaussian. Due to the slowness of performing a
KDE on all data, it is often useful to disable this before producing final
plots. If float, scales the width of the KDE bandpass manually.
smooth : int|list[int], optional
Defaults to 3. How much to smooth the marginalised distributions using a gaussian filter.
If ``kde`` is set to true, this parameter is ignored. Setting it to either
``0``, ``False`` disables smoothing. For grid data, smoothing
is set to 0 by default, not 3.
cloud : bool|list[bool], optional
If set, overrides the default behaviour and plots the cloud or not
shade : bool|list[bool] optional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float|list[float], optional
Filled contour alpha value override. Default is 1.0. If a list is passed, you can set the
shade opacity for specific chains.
shade_gradient : float|list[float], optional
How much to vary colours in different contour levels.
bar_shade : bool|list[bool], optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
num_cloud : int|list[int], optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
color_params : str|list[str], optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool|list[bool], optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str|list[str], optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
plot_contour : bool|list[bool], optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool|list[bool], optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
global_point : bool, optional
Whether the point which gets plotted is the global posterior maximum, or the marginalised 2D
posterior maximum. Note that when you use marginalised 2D maximums for the points, you do not
get the 1D histograms. Defaults to `True`, for a global maximum value.
marker_style : str|list[str], optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|list[numeric], optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric|list[numeric], optional
The alpha values when plotting markers.
usetex : bool, optional
Whether or not to parse text as LaTeX in plots.
diagonal_tick_labels : bool, optional
Whether to display tick labels on a 45 degree angle.
label_font_size : int|float, optional
The font size for plot axis labels and axis titles if summaries are configured to display.
tick_font_size : int|float, optional
The font size for the tick labels in the plots.
spacing : float, optional
The amount of spacing to add between plots. Defaults to `None`, which equates to 1.0 for less
than 6 dimensions and 0.0 for higher dimensions.
contour_labels : string, optional
If unset do not plot contour labels. If set to "confidence", label the using confidence
intervals. If set to "sigma", labels using sigma.
contour_label_font_size : int|float, optional
The font size for contour labels, if they are enabled.
legend_kwargs : dict, optional
Extra arguments to pass to the legend api.
legend_location : tuple(int,int), optional
Specifies the subplot in which to locate the legend. By default, this will be (0, -1),
corresponding to the top right subplot if there are more than two parameters,
and the bottom left plot for only two parameters with flip on.
For having the legend in the primary subplot
in the bottom left, set to (-1,0).
legend_artists : bool, optional
Whether to include hide artists in the legend. If all linestyles and line widths are identical,
this will default to false (as only the colours change). Otherwise it will be true.
legend_color_text : bool, optional
Whether to colour the legend text.
watermark_text_kwargs : dict, optional
Options to pass to the fontdict property when generating text for the watermark.
summary_area : float, optional
The confidence interval used when generating parameter summaries. Defaults to 1 sigma, aka 0.6827
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
|
entailment
|
def configure_truth(self, **kwargs): # pragma: no cover
""" Configure the arguments passed to the ``axvline`` and ``axhline``
methods when plotting truth values.
If you do not call this explicitly, the :func:`plot` method will
invoke this method automatically.
Recommended to set the parameters ``linestyle``, ``color`` and/or ``alpha``
if you want some basic control.
Default is to use an opaque black dashed line.
Parameters
----------
kwargs : dict
The keyword arguments to unwrap when calling ``axvline`` and ``axhline``.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
if kwargs.get("ls") is None and kwargs.get("linestyle") is None:
kwargs["ls"] = "--"
kwargs["dashes"] = (3, 3)
if kwargs.get("color") is None:
kwargs["color"] = "#000000"
self.config_truth = kwargs
self._configured_truth = True
return self
|
Configure the arguments passed to the ``axvline`` and ``axhline``
methods when plotting truth values.
If you do not call this explicitly, the :func:`plot` method will
invoke this method automatically.
Recommended to set the parameters ``linestyle``, ``color`` and/or ``alpha``
if you want some basic control.
Default is to use an opaque black dashed line.
Parameters
----------
kwargs : dict
The keyword arguments to unwrap when calling ``axvline`` and ``axhline``.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
|
entailment
|
def divide_chain(self, chain=0):
"""
Returns a ChainConsumer instance containing all the walks of a given chain
as individual chains themselves.
This method might be useful if, for example, your chain was made using
MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could
call this to get a ChainConsumer instance with one chain for ech of the
four walks. If you then plot, hopefully all four contours
you would see agree.
Parameters
----------
chain : int|str, optional
The index or name of the chain you want divided
Returns
-------
ChainConsumer
A new ChainConsumer instance with the same settings as the parent instance, containing
``num_walker`` chains.
"""
indexes = self._get_chain(chain)
con = ChainConsumer()
for index in indexes:
chain = self.chains[index]
assert chain.walkers is not None, "The chain you have selected was not added with any walkers!"
num_walkers = chain.walkers
data = np.split(chain.chain, num_walkers)
ws = np.split(chain.weights, num_walkers)
for j, (c, w) in enumerate(zip(data, ws)):
con.add_chain(c, weights=w, name="Chain %d" % j, parameters=chain.parameters)
return con
|
Returns a ChainConsumer instance containing all the walks of a given chain
as individual chains themselves.
This method might be useful if, for example, your chain was made using
MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could
call this to get a ChainConsumer instance with one chain for ech of the
four walks. If you then plot, hopefully all four contours
you would see agree.
Parameters
----------
chain : int|str, optional
The index or name of the chain you want divided
Returns
-------
ChainConsumer
A new ChainConsumer instance with the same settings as the parent instance, containing
``num_walker`` chains.
|
entailment
|
def threshold(args):
"""Calculate motif score threshold for a given FPR."""
if args.fpr < 0 or args.fpr > 1:
print("Please specify a FPR between 0 and 1")
sys.exit(1)
motifs = read_motifs(args.pwmfile)
s = Scanner()
s.set_motifs(args.pwmfile)
s.set_threshold(args.fpr, filename=args.inputfile)
print("Motif\tScore\tCutoff")
for motif in motifs:
min_score = motif.pwm_min_score()
max_score = motif.pwm_max_score()
opt_score = s.threshold[motif.id]
if opt_score is None:
opt_score = motif.pwm_max_score()
threshold = (opt_score - min_score) / (max_score - min_score)
print("{0}\t{1}\t{2}".format(
motif.id, opt_score, threshold))
|
Calculate motif score threshold for a given FPR.
|
entailment
|
def values_to_labels(fg_vals, bg_vals):
"""
Convert two arrays of values to an array of labels and an array of scores.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
y_true : array
Labels.
y_score : array
Values.
"""
y_true = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals))))
y_score = np.hstack((fg_vals, bg_vals))
return y_true, y_score
|
Convert two arrays of values to an array of labels and an array of scores.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
y_true : array
Labels.
y_score : array
Values.
|
entailment
|
def recall_at_fdr(fg_vals, bg_vals, fdr_cutoff=0.1):
"""
Computes the recall at a specific FDR (default 10%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fdr : float, optional
The FDR (between 0.0 and 1.0).
Returns
-------
recall : float
The recall at the specified FDR.
"""
if len(fg_vals) == 0:
return 0.0
y_true, y_score = values_to_labels(fg_vals, bg_vals)
precision, recall, _ = precision_recall_curve(y_true, y_score)
fdr = 1 - precision
cutoff_index = next(i for i, x in enumerate(fdr) if x <= fdr_cutoff)
return recall[cutoff_index]
|
Computes the recall at a specific FDR (default 10%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fdr : float, optional
The FDR (between 0.0 and 1.0).
Returns
-------
recall : float
The recall at the specified FDR.
|
entailment
|
def matches_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - fpr * 100)
return [sum(fg_vals >= s), sum(bg_vals >= s)]
|
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
|
entailment
|
def phyper_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - fpr * 100)
table = [
[sum(fg_vals >= s), sum(bg_vals >= s)],
[sum(fg_vals < s), sum(bg_vals < s)],
]
return fisher_exact(table, alternative="greater")[1]
|
Computes the hypergeometric p-value at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
|
entailment
|
def fraction_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the fraction positives at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
"""
fg_vals = np.array(fg_vals)
s = scoreatpercentile(bg_vals, 100 - 100 * fpr)
return len(fg_vals[fg_vals >= s]) / float(len(fg_vals))
|
Computes the fraction positives at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
fraction : float
The fraction positives at the specified FPR.
|
entailment
|
def score_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Returns the motif score at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
score : float
The motif score at the specified FPR.
"""
bg_vals = np.array(bg_vals)
return scoreatpercentile(bg_vals, 100 - 100 * fpr)
|
Returns the motif score at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
score : float
The motif score at the specified FPR.
|
entailment
|
def enr_at_fpr(fg_vals, bg_vals, fpr=0.01):
"""
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
"""
pos = np.array(fg_vals)
neg = np.array(bg_vals)
s = scoreatpercentile(neg, 100 - fpr * 100)
neg_matches = float(len(neg[neg >= s]))
if neg_matches == 0:
return float("inf")
return len(pos[pos >= s]) / neg_matches * len(neg) / float(len(pos))
|
Computes the enrichment at a specific FPR (default 1%).
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
fpr : float, optional
The FPR (between 0.0 and 1.0).
Returns
-------
enrichment : float
The enrichment at the specified FPR.
|
entailment
|
def max_enrichment(fg_vals, bg_vals, minbg=2):
"""
Computes the maximum enrichment.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
minbg : int, optional
Minimum number of matches in background. The default is 2.
Returns
-------
enrichment : float
Maximum enrichment.
"""
scores = np.hstack((fg_vals, bg_vals))
idx = np.argsort(scores)
x = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals))))
xsort = x[idx]
l_fg = len(fg_vals)
l_bg = len(bg_vals)
m = 0
s = 0
for i in range(len(scores), 0, -1):
bgcount = float(len(xsort[i:][xsort[i:] == 0]))
if bgcount >= minbg:
enr = (len(xsort[i:][xsort[i:] == 1]) / l_fg) / (bgcount / l_bg)
if enr > m:
m = enr
s = scores[idx[i]]
return m
|
Computes the maximum enrichment.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
minbg : int, optional
Minimum number of matches in background. The default is 2.
Returns
-------
enrichment : float
Maximum enrichment.
|
entailment
|
def mncp(fg_vals, bg_vals):
"""
Computes the Mean Normalized Conditional Probability (MNCP).
MNCP is described in Clarke & Granek, Bioinformatics, 2003.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
score : float
MNCP score
"""
fg_len = len(fg_vals)
total_len = len(fg_vals) + len(bg_vals)
if not isinstance(fg_vals, np.ndarray):
fg_vals = np.array(fg_vals)
if not isinstance(bg_vals, np.ndarray):
bg_vals = np.array(bg_vals)
fg_rank = stats.rankdata(fg_vals)
total_rank = stats.rankdata(np.hstack((fg_vals, bg_vals)))
slopes = []
for i in range(len(fg_vals)):
slope = ((fg_len - fg_rank[i] + 1) / fg_len ) / (
(total_len - total_rank[i] + 1)/ total_len)
slopes.append(slope)
return np.mean(slopes)
|
Computes the Mean Normalized Conditional Probability (MNCP).
MNCP is described in Clarke & Granek, Bioinformatics, 2003.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
score : float
MNCP score
|
entailment
|
def pr_auc(fg_vals, bg_vals):
"""
Computes the Precision-Recall Area Under Curve (PR AUC)
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
Returns
-------
score : float
PR AUC score
"""
# Create y_labels
y_true, y_score = values_to_labels(fg_vals, bg_vals)
return average_precision_score(y_true, y_score)
|
Computes the Precision-Recall Area Under Curve (PR AUC)
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
Returns
-------
score : float
PR AUC score
|
entailment
|
def roc_auc(fg_vals, bg_vals):
"""
Computes the ROC Area Under Curve (ROC AUC)
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
Returns
-------
score : float
ROC AUC score
"""
# Create y_labels
y_true, y_score = values_to_labels(fg_vals, bg_vals)
return roc_auc_score(y_true, y_score)
|
Computes the ROC Area Under Curve (ROC AUC)
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
Returns
-------
score : float
ROC AUC score
|
entailment
|
def roc_auc_xlim(x_bla, y_bla, xlim=0.1):
"""
Computes the ROC Area Under Curve until a certain FPR value.
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
xlim : float, optional
FPR value
Returns
-------
score : float
ROC AUC score
"""
x = x_bla[:]
y = y_bla[:]
x.sort()
y.sort()
u = {}
for i in x + y:
u[i] = 1
vals = sorted(u.keys())
len_x = float(len(x))
len_y = float(len(y))
new_x = []
new_y = []
x_p = 0
y_p = 0
for val in vals[::-1]:
while len(x) > 0 and x[-1] >= val:
x.pop()
x_p += 1
while len(y) > 0 and y[-1] >= val:
y.pop()
y_p += 1
new_y.append((len_x - x_p) / len_x)
new_x.append((len_y - y_p) / len_y)
#print new_x
#print new_y
new_x = 1 - np.array(new_x)
new_y = 1 - np.array(new_y)
#plot(new_x, new_y)
#show()
x = new_x
y = new_y
if len(x) != len(y):
raise ValueError("Unequal!")
if not xlim:
xlim = 1.0
auc = 0.0
bla = zip(stats.rankdata(x), range(len(x)))
bla = sorted(bla, key=lambda x: x[1])
prev_x = x[bla[0][1]]
prev_y = y[bla[0][1]]
index = 1
while index < len(bla) and x[bla[index][1]] <= xlim:
_, i = bla[index]
auc += y[i] * (x[i] - prev_x) - ((x[i] - prev_x) * (y[i] - prev_y) / 2.0)
prev_x = x[i]
prev_y = y[i]
index += 1
if index < len(bla):
(rank, i) = bla[index]
auc += prev_y * (xlim - prev_x) + ((y[i] - prev_y)/(x[i] - prev_x) * (xlim -prev_x) * (xlim - prev_x)/2)
return auc
|
Computes the ROC Area Under Curve until a certain FPR value.
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
xlim : float, optional
FPR value
Returns
-------
score : float
ROC AUC score
|
entailment
|
def roc_values(fg_vals, bg_vals):
"""
Return fpr (x) and tpr (y) of the ROC curve.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
fpr : array
False positive rate.
tpr : array
True positive rate.
"""
if len(fg_vals) == 0:
return 0
y_true, y_score = values_to_labels(fg_vals, bg_vals)
fpr, tpr, _thresholds = roc_curve(y_true, y_score)
return fpr, tpr
|
Return fpr (x) and tpr (y) of the ROC curve.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
fpr : array
False positive rate.
tpr : array
True positive rate.
|
entailment
|
def max_fmeasure(fg_vals, bg_vals):
"""
Computes the maximum F-measure.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
f : float
Maximum f-measure.
"""
x, y = roc_values(fg_vals, bg_vals)
x, y = x[1:], y[1:] # don't include origin
p = y / (y + x)
filt = np.logical_and((p * y) > 0, (p + y) > 0)
p = p[filt]
y = y[filt]
f = (2 * p * y) / (p + y)
if len(f) > 0:
#return np.nanmax(f), np.nanmax(y[f == np.nanmax(f)])
return np.nanmax(f)
else:
return None
|
Computes the maximum F-measure.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
f : float
Maximum f-measure.
|
entailment
|
def ks_pvalue(fg_pos, bg_pos=None):
"""
Computes the Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
KS p-value.
"""
if len(fg_pos) == 0:
return 1.0
a = np.array(fg_pos, dtype="float") / max(fg_pos)
p = kstest(a, "uniform")[1]
return p
|
Computes the Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
KS p-value.
|
entailment
|
def ks_significance(fg_pos, bg_pos=None):
"""
Computes the -log10 of Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
-log10(KS p-value).
"""
p = ks_pvalue(fg_pos, max(fg_pos))
if p > 0:
return -np.log10(p)
else:
return np.inf
|
Computes the -log10 of Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
-log10(KS p-value).
|
entailment
|
def setup_data():
"""Load and shape data for training with Keras + Pescador.
Returns
-------
input_shape : tuple, len=3
Shape of each sample; adapts to channel configuration of Keras.
X_train, y_train : np.ndarrays
Images and labels for training.
X_test, y_test : np.ndarrays
Images and labels for test.
"""
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return input_shape, (x_train, y_train), (x_test, y_test)
|
Load and shape data for training with Keras + Pescador.
Returns
-------
input_shape : tuple, len=3
Shape of each sample; adapts to channel configuration of Keras.
X_train, y_train : np.ndarrays
Images and labels for training.
X_test, y_test : np.ndarrays
Images and labels for test.
|
entailment
|
def build_model(input_shape):
"""Create a compiled Keras model.
Parameters
----------
input_shape : tuple, len=3
Shape of each image sample.
Returns
-------
model : keras.Model
Constructed model.
"""
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
|
Create a compiled Keras model.
Parameters
----------
input_shape : tuple, len=3
Shape of each image sample.
Returns
-------
model : keras.Model
Constructed model.
|
entailment
|
def sampler(X, y):
'''A basic generator for sampling data.
Parameters
----------
X : np.ndarray, len=n_samples, ndim=4
Image data.
y : np.ndarray, len=n_samples, ndim=2
One-hot encoded class vectors.
Yields
------
data : dict
Single image sample, like {X: np.ndarray, y: np.ndarray}
'''
X = np.atleast_2d(X)
# y's are binary vectors, and should be of shape (10,) after this.
y = np.atleast_1d(y)
n = X.shape[0]
while True:
i = np.random.randint(0, n)
yield {'X': X[i], 'y': y[i]}
|
A basic generator for sampling data.
Parameters
----------
X : np.ndarray, len=n_samples, ndim=4
Image data.
y : np.ndarray, len=n_samples, ndim=2
One-hot encoded class vectors.
Yields
------
data : dict
Single image sample, like {X: np.ndarray, y: np.ndarray}
|
entailment
|
def additive_noise(stream, key='X', scale=1e-1):
'''Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
'''
for data in stream:
noise_shape = data[key].shape
noise = scale * np.random.randn(*noise_shape)
data[key] = data[key] + noise
yield data
|
Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
|
entailment
|
def parse_denovo_params(user_params=None):
"""Return default GimmeMotifs parameters.
Defaults will be replaced with parameters defined in user_params.
Parameters
----------
user_params : dict, optional
User-defined parameters.
Returns
-------
params : dict
"""
config = MotifConfig()
if user_params is None:
user_params = {}
params = config.get_default_params()
params.update(user_params)
if params.get("torque"):
logger.debug("Using torque")
else:
logger.debug("Using multiprocessing")
params["background"] = [x.strip() for x in params["background"].split(",")]
logger.debug("Parameters:")
for param, value in params.items():
logger.debug(" %s: %s", param, value)
# Maximum time?
if params["max_time"]:
try:
max_time = params["max_time"] = float(params["max_time"])
except Exception:
logger.debug("Could not parse max_time value, setting to no limit")
params["max_time"] = -1
if params["max_time"] > 0:
logger.debug("Time limit for motif prediction: %0.2f hours", max_time)
params["max_time"] = 3600 * params["max_time"]
logger.debug("Max_time in seconds %0.0f", max_time)
else:
logger.debug("No time limit for motif prediction")
return params
|
Return default GimmeMotifs parameters.
Defaults will be replaced with parameters defined in user_params.
Parameters
----------
user_params : dict, optional
User-defined parameters.
Returns
-------
params : dict
|
entailment
|
def rankagg_R(df, method="stuart"):
"""Return aggregated ranks as implemented in the RobustRankAgg R package.
This function is now deprecated.
References:
Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709
Stuart et al., 2003, DOI: 10.1126/science.1087447
Parameters
----------
df : pandas.DataFrame
DataFrame with values to be ranked and aggregated
Returns
-------
pandas.DataFrame with aggregated ranks
"""
tmpdf = NamedTemporaryFile()
tmpscript = NamedTemporaryFile(mode="w")
tmpranks = NamedTemporaryFile()
df.to_csv(tmpdf.name, sep="\t",index=False)
script = '''
library(RobustRankAggreg);
a = read.table("{}", header=TRUE);
x = lapply(a, as.vector);
result = aggregateRanks(x, method="{}");
result$p.adjust = p.adjust(result$Score);
write.table(result, file="{}", sep="\t", quote=FALSE, row.names=FALSE);
'''.format(tmpdf.name, method, tmpranks.name)
tmpscript.write(script)
tmpscript.flush()
p = sp.Popen(["Rscript", tmpscript.name], stdout=sp.PIPE, stderr=sp.PIPE)
stderr, stdout = p.communicate()
df = pd.read_table(tmpranks.name, index_col=0)
return df["p.adjust"]
|
Return aggregated ranks as implemented in the RobustRankAgg R package.
This function is now deprecated.
References:
Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709
Stuart et al., 2003, DOI: 10.1126/science.1087447
Parameters
----------
df : pandas.DataFrame
DataFrame with values to be ranked and aggregated
Returns
-------
pandas.DataFrame with aggregated ranks
|
entailment
|
def rankagg(df, method="stuart"):
"""Return aggregated ranks.
Implementation is ported from the RobustRankAggreg R package
References:
Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709
Stuart et al., 2003, DOI: 10.1126/science.1087447
Parameters
----------
df : pandas.DataFrame
DataFrame with values to be ranked and aggregated
Returns
-------
pandas.DataFrame with aggregated ranks
"""
rmat = pd.DataFrame(index=df.iloc[:,0])
step = 1 / rmat.shape[0]
for col in df.columns:
rmat[col] = pd.DataFrame({col:np.arange(step, 1 + step, step)}, index=df[col]).loc[rmat.index]
rmat = rmat.apply(sorted, 1, result_type="expand")
p = rmat.apply(qStuart, 1)
df = pd.DataFrame(
{"p.adjust":multipletests(p, method="h")[1]},
index=rmat.index).sort_values('p.adjust')
return df["p.adjust"]
|
Return aggregated ranks.
Implementation is ported from the RobustRankAggreg R package
References:
Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709
Stuart et al., 2003, DOI: 10.1126/science.1087447
Parameters
----------
df : pandas.DataFrame
DataFrame with values to be ranked and aggregated
Returns
-------
pandas.DataFrame with aggregated ranks
|
entailment
|
def data_gen(n_ops=100):
"""Yield data, while optionally burning compute cycles.
Parameters
----------
n_ops : int, default=100
Number of operations to run between yielding data.
Returns
-------
data : dict
A object which looks like it might come from some
machine learning problem, with X as features, and y as targets.
"""
while True:
X = np.random.uniform(size=(64, 64))
yield dict(X=costly_function(X, n_ops),
y=np.random.randint(10, size=(1,)))
|
Yield data, while optionally burning compute cycles.
Parameters
----------
n_ops : int, default=100
Number of operations to run between yielding data.
Returns
-------
data : dict
A object which looks like it might come from some
machine learning problem, with X as features, and y as targets.
|
entailment
|
def mp_calc_stats(motifs, fg_fa, bg_fa, bg_name=None):
"""Parallel calculation of motif statistics."""
try:
stats = calc_stats(motifs, fg_fa, bg_fa, ncpus=1)
except Exception as e:
raise
sys.stderr.write("ERROR: {}\n".format(str(e)))
stats = {}
if not bg_name:
bg_name = "default"
return bg_name, stats
|
Parallel calculation of motif statistics.
|
entailment
|
def _run_tool(job_name, t, fastafile, params):
"""Parallel motif prediction."""
try:
result = t.run(fastafile, params, mytmpdir())
except Exception as e:
result = ([], "", "{} failed to run: {}".format(job_name, e))
return job_name, result
|
Parallel motif prediction.
|
entailment
|
def pp_predict_motifs(fastafile, outfile, analysis="small", organism="hg18", single=False, background="", tools=None, job_server=None, ncpus=8, max_time=-1, stats_fg=None, stats_bg=None):
"""Parallel prediction of motifs.
Utility function for gimmemotifs.denovo.gimme_motifs. Probably better to
use that, instead of this function directly.
"""
if tools is None:
tools = {}
config = MotifConfig()
if not tools:
tools = dict([(x,1) for x in config.get_default_params["tools"].split(",")])
#logger = logging.getLogger('gimme.prediction.pp_predict_motifs')
wmin = 5
step = 1
if analysis in ["large","xl"]:
step = 2
wmin = 6
analysis_max = {"xs":5,"small":8, "medium":10,"large":14, "xl":20}
wmax = analysis_max[analysis]
if analysis == "xs":
sys.stderr.write("Setting analysis xs to small")
analysis = "small"
if not job_server:
n_cpus = int(config.get_default_params()["ncpus"])
job_server = Pool(processes=n_cpus, maxtasksperchild=1000)
jobs = {}
result = PredictionResult(
outfile,
fg_file=stats_fg,
background=stats_bg,
job_server=job_server,
)
# Dynamically load all tools
toolio = [x[1]() for x in inspect.getmembers(
tool_classes,
lambda x:
inspect.isclass(x) and
issubclass(x, tool_classes.MotifProgram)
) if x[0] != 'MotifProgram']
# TODO:
# Add warnings for running time: Weeder, GADEM
### Add all jobs to the job_server ###
params = {
'analysis': analysis,
'background':background,
"single":single,
"organism":organism
}
# Tools that don't use a specified width usually take longer
# ie. GADEM, XXmotif, MEME
# Start these first.
for t in [tool for tool in toolio if not tool.use_width]:
if t.name in tools and tools[t.name]:
logger.debug("Starting %s job", t.name)
job_name = t.name
jobs[job_name] = job_server.apply_async(
_run_tool,
(job_name, t, fastafile, params),
callback=result.add_motifs)
else:
logger.debug("Skipping %s", t.name)
for t in [tool for tool in toolio if tool.use_width]:
if t.name in tools and tools[t.name]:
for i in range(wmin, wmax + 1, step):
logger.debug("Starting %s job, width %s", t.name, i)
job_name = "%s_width_%s" % (t.name, i)
my_params = params.copy()
my_params['width'] = i
jobs[job_name] = job_server.apply_async(
_run_tool,
(job_name, t, fastafile, my_params),
callback=result.add_motifs)
else:
logger.debug("Skipping %s", t.name)
logger.info("all jobs submitted")
for job in jobs.values():
job.get()
result.wait_for_stats()
### Wait until all jobs are finished or the time runs out ###
# start_time = time()
# try:
# # Run until all jobs are finished
# while len(result.finished) < len(jobs.keys()) and (not(max_time) or time() - start_time < max_time):
# pass
# if len(result.finished) < len(jobs.keys()):
# logger.info("Maximum allowed running time reached, destroying remaining jobs")
# job_server.terminate()
# result.submit_remaining_stats()
# ### Or the user gets impatient... ###
# except KeyboardInterrupt:
# # Destroy all running jobs
# logger.info("Caught interrupt, destroying all running jobs")
# job_server.terminate()
# result.submit_remaining_stats()
#
#
# if stats_fg and stats_bg:
# logger.info("waiting for motif statistics")
# n = 0
# last_len = 0
#
#
# while len(set(result.stats.keys())) < len(set([str(m) for m in result.motifs])):
# if n >= 30:
# logger.debug("waited long enough")
# logger.debug("motifs: %s, stats: %s", len(result.motifs), len(result.stats.keys()))
# for i,motif in enumerate(result.motifs):
# if "{}_{}".format(motif.id, motif.to_consensus()) not in result.stats:
# logger.debug("deleting %s", motif)
# del result.motifs[i]
# break
# sleep(2)
# if len(result.stats.keys()) == last_len:
# n += 1
# else:
# last_len = len(result.stats.keys())
# n = 0
#
return result
|
Parallel prediction of motifs.
Utility function for gimmemotifs.denovo.gimme_motifs. Probably better to
use that, instead of this function directly.
|
entailment
|
def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None):
""" Predict motifs, input is a FASTA-file"""
# Parse parameters
required_params = ["tools", "available_tools", "analysis",
"genome", "use_strand", "max_time"]
if params is None:
params = parse_denovo_params()
else:
for p in required_params:
if p not in params:
params = parse_denovo_params()
break
# Define all tools
tools = dict(
[
(x.strip(), x in [y.strip() for y in params["tools"].split(",")])
for x in params["available_tools"].split(",")
]
)
# Predict the motifs
analysis = params["analysis"]
logger.info("starting motif prediction (%s)", analysis)
logger.info("tools: %s",
", ".join([x for x in tools.keys() if tools[x]]))
result = pp_predict_motifs(
infile,
outfile,
analysis,
params.get("genome", None),
params["use_strand"],
bgfile,
tools,
None,
#logger=logger,
max_time=params["max_time"],
stats_fg=stats_fg,
stats_bg=stats_bg
)
motifs = result.motifs
logger.info("predicted %s motifs", len(motifs))
logger.debug("written to %s", outfile)
if len(motifs) == 0:
logger.info("no motifs found")
result.motifs = []
return result
|
Predict motifs, input is a FASTA-file
|
entailment
|
def add_motifs(self, args):
"""Add motifs to the result object."""
self.lock.acquire()
# Callback function for motif programs
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
#job_id = "%s_%s" % (motif.id, motif.to_consensus())
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(motifs, self.fg_fa, bg_fa, bg_name),
callback=self.add_stats
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release()
|
Add motifs to the result object.
|
entailment
|
def wait_for_stats(self):
"""Make sure all jobs are finished."""
logging.debug("waiting for statistics to finish")
for job in self.stat_jobs:
job.get()
sleep(2)
|
Make sure all jobs are finished.
|
entailment
|
def add_stats(self, args):
"""Callback to add motif statistics."""
bg_name, stats = args
logger.debug("Stats: %s %s", bg_name, stats)
for motif_id in stats.keys():
if motif_id not in self.stats:
self.stats[motif_id] = {}
self.stats[motif_id][bg_name] = stats[motif_id]
|
Callback to add motif statistics.
|
entailment
|
def prepare_denovo_input_narrowpeak(inputfile, params, outdir):
"""Prepare a narrowPeak file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
"""
bedfile = os.path.join(outdir, "input.from.narrowpeak.bed")
p = re.compile(r'^(#|track|browser)')
width = int(params["width"])
logger.info("preparing input (narrowPeak to BED, width %s)", width)
warn_no_summit = True
with open(bedfile, "w") as f_out:
with open(inputfile) as f_in:
for line in f_in:
if p.search(line):
continue
vals = line.strip().split("\t")
start, end = int(vals[1]), int(vals[2])
summit = int(vals[9])
if summit == -1:
if warn_no_summit:
logger.warn("No summit present in narrowPeak file, using the peak center.")
warn_no_summit = False
summit = (end - start) // 2
start = start + summit - (width // 2)
end = start + width
f_out.write("{}\t{}\t{}\t{}\n".format(
vals[0],
start,
end,
vals[6]
))
prepare_denovo_input_bed(bedfile, params, outdir)
|
Prepare a narrowPeak file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
|
entailment
|
def prepare_denovo_input_bed(inputfile, params, outdir):
"""Prepare a BED file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
"""
logger.info("preparing input (BED)")
# Create BED file with regions of equal size
width = int(params["width"])
bedfile = os.path.join(outdir, "input.bed")
write_equalwidth_bedfile(inputfile, width, bedfile)
abs_max = int(params["abs_max"])
fraction = float(params["fraction"])
pred_bedfile = os.path.join(outdir, "prediction.bed")
val_bedfile = os.path.join(outdir, "validation.bed")
# Split input into prediction and validation set
logger.debug(
"Splitting %s into prediction set (%s) and validation set (%s)",
bedfile, pred_bedfile, val_bedfile)
divide_file(bedfile, pred_bedfile, val_bedfile, fraction, abs_max)
config = MotifConfig()
genome = Genome(params["genome"])
for infile in [pred_bedfile, val_bedfile]:
genome.track2fasta(
infile,
infile.replace(".bed", ".fa"),
)
# Create file for location plots
lwidth = int(params["lwidth"])
extend = (lwidth - width) // 2
genome.track2fasta(
val_bedfile,
os.path.join(outdir, "localization.fa"),
extend_up=extend,
extend_down=extend,
stranded=params["use_strand"],
)
|
Prepare a BED file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
|
entailment
|
def prepare_denovo_input_fa(inputfile, params, outdir):
"""Create all the FASTA files for de novo motif prediction and validation.
Parameters
----------
"""
fraction = float(params["fraction"])
abs_max = int(params["abs_max"])
logger.info("preparing input (FASTA)")
pred_fa = os.path.join(outdir, "prediction.fa")
val_fa = os.path.join(outdir, "validation.fa")
loc_fa = os.path.join(outdir, "localization.fa")
# Split inputfile in prediction and validation set
logger.debug(
"Splitting %s into prediction set (%s) and validation set (%s)",
inputfile, pred_fa, val_fa)
divide_fa_file(inputfile, pred_fa, val_fa, fraction, abs_max)
# File for location plots
shutil.copy(val_fa, loc_fa)
seqs = Fasta(loc_fa).seqs
lwidth = len(seqs[0])
all_same_width = not(False in [len(seq) == lwidth for seq in seqs])
if not all_same_width:
logger.warn(
"PLEASE NOTE: FASTA file contains sequences of different lengths. "
"Positional preference plots might be incorrect!")
|
Create all the FASTA files for de novo motif prediction and validation.
Parameters
----------
|
entailment
|
def create_background(bg_type, fafile, outfile, genome="hg18", width=200, nr_times=10, custom_background=None):
"""Create background of a specific type.
Parameters
----------
bg_type : str
Name of background type.
fafile : str
Name of input FASTA file.
outfile : str
Name of output FASTA file.
genome : str, optional
Genome name.
width : int, optional
Size of regions.
nr_times : int, optional
Generate this times as many background sequences as compared to
input file.
Returns
-------
nr_seqs : int
Number of sequences created.
"""
width = int(width)
config = MotifConfig()
fg = Fasta(fafile)
if bg_type in ["genomic", "gc"]:
if not genome:
logger.error("Need a genome to create background")
sys.exit(1)
if bg_type == "random":
f = MarkovFasta(fg, k=1, n=nr_times * len(fg))
logger.debug("Random background: %s", outfile)
elif bg_type == "genomic":
logger.debug("Creating genomic background")
f = RandomGenomicFasta(genome, width, nr_times * len(fg))
elif bg_type == "gc":
logger.debug("Creating GC matched background")
f = MatchedGcFasta(fafile, genome, nr_times * len(fg))
logger.debug("GC matched background: %s", outfile)
elif bg_type == "promoter":
fname = Genome(genome).filename
gene_file = fname.replace(".fa", ".annotation.bed.gz")
if not gene_file:
gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome)
if not os.path.exists(gene_file):
print("Could not find a gene file for genome {}")
print("Did you use the --annotation flag for genomepy?")
print("Alternatively make sure there is a file called {}.bed in {}".format(genome, config.get_gene_dir()))
raise ValueError()
logger.info(
"Creating random promoter background (%s, using genes in %s)",
genome, gene_file)
f = PromoterFasta(gene_file, genome, width, nr_times * len(fg))
logger.debug("Random promoter background: %s", outfile)
elif bg_type == "custom":
bg_file = custom_background
if not bg_file:
raise IOError(
"Background file not specified!")
if not os.path.exists(bg_file):
raise IOError(
"Custom background file %s does not exist!",
bg_file)
else:
logger.info("Copying custom background file %s to %s.",
bg_file, outfile)
f = Fasta(bg_file)
l = np.median([len(seq) for seq in f.seqs])
if l < (width * 0.95) or l > (width * 1.05):
logger.warn(
"The custom background file %s contains sequences with a "
"median length of %s, while GimmeMotifs predicts motifs in sequences "
"of length %s. This will influence the statistics! It is recommended "
"to use background sequences of the same length.",
bg_file, l, width)
f.writefasta(outfile)
return len(f)
|
Create background of a specific type.
Parameters
----------
bg_type : str
Name of background type.
fafile : str
Name of input FASTA file.
outfile : str
Name of output FASTA file.
genome : str, optional
Genome name.
width : int, optional
Size of regions.
nr_times : int, optional
Generate this times as many background sequences as compared to
input file.
Returns
-------
nr_seqs : int
Number of sequences created.
|
entailment
|
def create_backgrounds(outdir, background=None, genome="hg38", width=200, custom_background=None):
"""Create different backgrounds for motif prediction and validation.
Parameters
----------
outdir : str
Directory to save results.
background : list, optional
Background types to create, default is 'random'.
genome : str, optional
Genome name (for genomic and gc backgrounds).
width : int, optional
Size of background regions
Returns
-------
bg_info : dict
Keys: background name, values: file name.
"""
if background is None:
background = ["random"]
nr_sequences = {}
# Create background for motif prediction
if "gc" in background:
pred_bg = "gc"
else:
pred_bg = background[0]
create_background(
pred_bg,
os.path.join(outdir, "prediction.fa"),
os.path.join(outdir, "prediction.bg.fa"),
genome=genome,
width=width,
custom_background=custom_background)
# Get background fasta files for statistics
bg_info = {}
nr_sequences = {}
for bg in background:
fname = os.path.join(outdir, "bg.{}.fa".format(bg))
nr_sequences[bg] = create_background(
bg,
os.path.join(outdir, "validation.fa"),
fname,
genome=genome,
width=width,
custom_background=custom_background)
bg_info[bg] = fname
return bg_info
|
Create different backgrounds for motif prediction and validation.
Parameters
----------
outdir : str
Directory to save results.
background : list, optional
Background types to create, default is 'random'.
genome : str, optional
Genome name (for genomic and gc backgrounds).
width : int, optional
Size of background regions
Returns
-------
bg_info : dict
Keys: background name, values: file name.
|
entailment
|
def _is_significant(stats, metrics=None):
"""Filter significant motifs based on several statistics.
Parameters
----------
stats : dict
Statistics disctionary object.
metrics : sequence
Metric with associated minimum values. The default is
(("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_fpr", 0.55))
Returns
-------
significant : bool
"""
if metrics is None:
metrics = (("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_fpr", 0.55))
for stat_name, min_value in metrics:
if stats.get(stat_name, 0) < min_value:
return False
return True
|
Filter significant motifs based on several statistics.
Parameters
----------
stats : dict
Statistics disctionary object.
metrics : sequence
Metric with associated minimum values. The default is
(("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_fpr", 0.55))
Returns
-------
significant : bool
|
entailment
|
def filter_significant_motifs(fname, result, bg, metrics=None):
"""Filter significant motifs based on several statistics.
Parameters
----------
fname : str
Filename of output file were significant motifs will be saved.
result : PredictionResult instance
Contains motifs and associated statistics.
bg : str
Name of background type to use.
metrics : sequence
Metric with associated minimum values. The default is
(("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_f[r", 0.55))
Returns
-------
motifs : list
List of Motif instances.
"""
sig_motifs = []
with open(fname, "w") as f:
for motif in result.motifs:
stats = result.stats.get(
"%s_%s" % (motif.id, motif.to_consensus()), {}).get(bg, {}
)
if _is_significant(stats, metrics):
f.write("%s\n" % motif.to_pfm())
sig_motifs.append(motif)
logger.info("%s motifs are significant", len(sig_motifs))
logger.debug("written to %s", fname)
return sig_motifs
|
Filter significant motifs based on several statistics.
Parameters
----------
fname : str
Filename of output file were significant motifs will be saved.
result : PredictionResult instance
Contains motifs and associated statistics.
bg : str
Name of background type to use.
metrics : sequence
Metric with associated minimum values. The default is
(("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_f[r", 0.55))
Returns
-------
motifs : list
List of Motif instances.
|
entailment
|
def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")):
"""Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
"""
# combine original and clustered motifs
motifs = read_motifs(single_pwm) + read_motifs(clus_pwm)
motifs = dict([(str(m), m) for m in motifs])
# get the statistics for those motifs that were not yet checked
clustered_motifs = []
for clus,singles in clusters:
for motif in set([clus] + singles):
if str(motif) not in stats:
clustered_motifs.append(motifs[str(motif)])
new_stats = {}
for bg, bg_fa in background.items():
for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items():
if m not in new_stats:
new_stats[m] = {}
new_stats[m][bg] = s
stats.update(new_stats)
rank = rank_motifs(stats, metrics)
# rank the motifs
best_motifs = []
for clus, singles in clusters:
if len(singles) > 1:
eval_motifs = singles
if clus not in motifs:
eval_motifs.append(clus)
eval_motifs = [motifs[str(e)] for e in eval_motifs]
best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1]
best_motifs.append(best_motif)
else:
best_motifs.append(clus)
for bg in background:
stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles)
best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True)
return best_motifs
|
Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.