sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def saveState(self, stateObj):
"""Utility methos to save plugin state stored in stateObj to persistent
storage to permit access to previous state in subsequent plugin runs.
Any object that can be pickled and unpickled can be used to store the
plugin state.
@param stateObj: Object that stores plugin state.
"""
try:
fp = open(self._stateFile, 'w')
pickle.dump(stateObj, fp)
except:
raise IOError("Failure in storing plugin state in file: %s"
% self._stateFile)
return True
|
Utility methos to save plugin state stored in stateObj to persistent
storage to permit access to previous state in subsequent plugin runs.
Any object that can be pickled and unpickled can be used to store the
plugin state.
@param stateObj: Object that stores plugin state.
|
entailment
|
def restoreState(self):
"""Utility method to restore plugin state from persistent storage to
permit access to previous plugin state.
@return: Object that stores plugin state.
"""
if os.path.exists(self._stateFile):
try:
fp = open(self._stateFile, 'r')
stateObj = pickle.load(fp)
except:
raise IOError("Failure in reading plugin state from file: %s"
% self._stateFile)
return stateObj
return None
|
Utility method to restore plugin state from persistent storage to
permit access to previous plugin state.
@return: Object that stores plugin state.
|
entailment
|
def appendGraph(self, graph_name, graph):
"""Utility method to associate Graph Object to Plugin.
This utility method is for use in constructor of child classes for
associating a MuninGraph instances to the plugin.
@param graph_name: Graph Name
@param graph: MuninGraph Instance
"""
self._graphDict[graph_name] = graph
self._graphNames.append(graph_name)
if not self.isMultigraph and len(self._graphNames) > 1:
raise AttributeError("Simple Munin Plugins cannot have more than one graph.")
|
Utility method to associate Graph Object to Plugin.
This utility method is for use in constructor of child classes for
associating a MuninGraph instances to the plugin.
@param graph_name: Graph Name
@param graph: MuninGraph Instance
|
entailment
|
def appendSubgraph(self, parent_name, graph_name, graph):
"""Utility method to associate Subgraph Instance to Root Graph Instance.
This utility method is for use in constructor of child classes for
associating a MuninGraph Subgraph instance with a Root Graph instance.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param graph: MuninGraph Instance
"""
if not self.isMultigraph:
raise AttributeError("Simple Munin Plugins cannot have subgraphs.")
if self._graphDict.has_key(parent_name):
if not self._subgraphDict.has_key(parent_name):
self._subgraphDict[parent_name] = {}
self._subgraphNames[parent_name] = []
self._subgraphDict[parent_name][graph_name] = graph
self._subgraphNames[parent_name].append(graph_name)
else:
raise AttributeError("Invalid parent graph name %s used for subgraph %s."
% (parent_name, graph_name))
|
Utility method to associate Subgraph Instance to Root Graph Instance.
This utility method is for use in constructor of child classes for
associating a MuninGraph Subgraph instance with a Root Graph instance.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param graph: MuninGraph Instance
|
entailment
|
def setGraphVal(self, graph_name, field_name, val):
"""Utility method to set Value for Field in Graph.
The private method is for use in retrieveVals() method of child classes.
@param graph_name: Graph Name
@param field_name: Field Name.
@param val: Value for field.
"""
graph = self._getGraph(graph_name, True)
if graph.hasField(field_name):
graph.setVal(field_name, val)
else:
raise AttributeError("Invalid field name %s for graph %s."
% (field_name, graph_name))
|
Utility method to set Value for Field in Graph.
The private method is for use in retrieveVals() method of child classes.
@param graph_name: Graph Name
@param field_name: Field Name.
@param val: Value for field.
|
entailment
|
def setSubgraphVal(self, parent_name, graph_name, field_name, val):
"""Set Value for Field in Subgraph.
The private method is for use in retrieveVals() method of child
classes.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param field_name: Field Name.
@param val: Value for field.
"""
subgraph = self._getSubGraph(parent_name, graph_name, True)
if subgraph.hasField(field_name):
subgraph.setVal(field_name, val)
else:
raise AttributeError("Invalid field name %s for subgraph %s "
"of parent graph %s."
% (field_name, graph_name, parent_name))
|
Set Value for Field in Subgraph.
The private method is for use in retrieveVals() method of child
classes.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param field_name: Field Name.
@param val: Value for field.
|
entailment
|
def getSubgraphList(self, parent_name):
"""Returns list of names of subgraphs for Root Graph with name parent_name.
@param parent_name: Name of Root Graph.
@return: List of subgraph names.
"""
if not self.isMultigraph:
raise AttributeError("Simple Munin Plugins cannot have subgraphs.")
if self._graphDict.has_key(parent_name):
return self._subgraphNames[parent_name] or []
else:
raise AttributeError("Invalid parent graph name %s."
% (parent_name,))
|
Returns list of names of subgraphs for Root Graph with name parent_name.
@param parent_name: Name of Root Graph.
@return: List of subgraph names.
|
entailment
|
def graphHasField(self, graph_name, field_name):
"""Return true if graph with name graph_name has field with
name field_name.
@param graph_name: Graph Name
@param field_name: Field Name.
@return: Boolean
"""
graph = self._graphDict.get(graph_name, True)
return graph.hasField(field_name)
|
Return true if graph with name graph_name has field with
name field_name.
@param graph_name: Graph Name
@param field_name: Field Name.
@return: Boolean
|
entailment
|
def subGraphHasField(self, parent_name, graph_name, field_name):
"""Return true if subgraph with name graph_name with parent graph with
name parent_name has field with name field_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param field_name: Field Name.
@return: Boolean
"""
subgraph = self._getSubGraph(parent_name, graph_name, True)
return subgraph.hasField(field_name)
|
Return true if subgraph with name graph_name with parent graph with
name parent_name has field with name field_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@param field_name: Field Name.
@return: Boolean
|
entailment
|
def getGraphFieldList(self, graph_name):
"""Returns list of names of fields for graph with name graph_name.
@param graph_name: Graph Name
@return: List of field names for graph.
"""
graph = self._getGraph(graph_name, True)
return graph.getFieldList()
|
Returns list of names of fields for graph with name graph_name.
@param graph_name: Graph Name
@return: List of field names for graph.
|
entailment
|
def getGraphFieldCount(self, graph_name):
"""Returns number of fields for graph with name graph_name.
@param graph_name: Graph Name
@return: Number of fields for graph.
"""
graph = self._getGraph(graph_name, True)
return graph.getFieldCount()
|
Returns number of fields for graph with name graph_name.
@param graph_name: Graph Name
@return: Number of fields for graph.
|
entailment
|
def getSubgraphFieldList(self, parent_name, graph_name):
"""Returns list of names of fields for subgraph with name graph_name
and parent graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: List of field names for subgraph.
"""
graph = self._getSubGraph(parent_name, graph_name, True)
return graph.getFieldList()
|
Returns list of names of fields for subgraph with name graph_name
and parent graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: List of field names for subgraph.
|
entailment
|
def getSubgraphFieldCount(self, parent_name, graph_name):
"""Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
"""
graph = self._getSubGraph(parent_name, graph_name, True)
return graph.getFieldCount()
|
Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
|
entailment
|
def config(self):
"""Implements Munin Plugin Graph Configuration.
Prints out configuration for graphs.
Use as is. Not required to be overwritten in child classes. The plugin
will work correctly as long as the Munin Graph objects have been
populated.
"""
for parent_name in self._graphNames:
graph = self._graphDict[parent_name]
if self.isMultigraph:
print "multigraph %s" % self._getMultigraphID(parent_name)
print self._formatConfig(graph.getConfig())
print
if (self.isMultigraph and self._nestedGraphs
and self._subgraphDict and self._subgraphNames):
for (parent_name, subgraph_names) in self._subgraphNames.iteritems():
for graph_name in subgraph_names:
graph = self._subgraphDict[parent_name][graph_name]
print "multigraph %s" % self.getMultigraphID(parent_name,
graph_name)
print self._formatConfig(graph.getConfig())
print
return True
|
Implements Munin Plugin Graph Configuration.
Prints out configuration for graphs.
Use as is. Not required to be overwritten in child classes. The plugin
will work correctly as long as the Munin Graph objects have been
populated.
|
entailment
|
def fetch(self):
"""Implements Munin Plugin Fetch Option.
Prints out measured values.
"""
self.retrieveVals()
for parent_name in self._graphNames:
graph = self._graphDict[parent_name]
if self.isMultigraph:
print "multigraph %s" % self._getMultigraphID(parent_name)
print self._formatVals(graph.getVals())
print
if (self.isMultigraph and self._nestedGraphs
and self._subgraphDict and self._subgraphNames):
for (parent_name, subgraph_names) in self._subgraphNames.iteritems():
for graph_name in subgraph_names:
graph = self._subgraphDict[parent_name][graph_name]
print "multigraph %s" % self.getMultigraphID(parent_name,
graph_name)
print self._formatVals(graph.getVals())
print
return True
|
Implements Munin Plugin Fetch Option.
Prints out measured values.
|
entailment
|
def run(self):
"""Implements main entry point for plugin execution."""
if len(self._argv) > 1 and len(self._argv[1]) > 0:
oper = self._argv[1]
else:
oper = 'fetch'
if oper == 'fetch':
ret = self.fetch()
elif oper == 'config':
ret = self.config()
if ret and self._dirtyConfig:
ret = self.fetch()
elif oper == 'autoconf':
ret = self.autoconf()
if ret:
print "yes"
else:
print "no"
ret = True
elif oper == 'suggest':
ret = self.suggest()
else:
raise AttributeError("Invalid command argument: %s" % oper)
return ret
|
Implements main entry point for plugin execution.
|
entailment
|
def addField(self, name, label, type=None, draw=None, info=None, #@ReservedAssignment
extinfo=None, colour=None, negative=None, graph=None,
min=None, max=None, cdef=None, line=None, #@ReservedAssignment
warning=None, critical=None):
"""Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value
"""
if self._autoFixNames:
name = self._fixName(name)
if negative is not None:
negative = self._fixName(negative)
self._fieldAttrDict[name] = dict(((k,v) for (k,v) in locals().iteritems()
if (v is not None
and k not in ('self',))))
self._fieldNameList.append(name)
|
Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value
|
entailment
|
def hasField(self, name):
"""Returns true if field with field_name exists.
@param name: Field Name
@return: Boolean
"""
if self._autoFixNames:
name = self._fixName(name)
return self._fieldAttrDict.has_key(name)
|
Returns true if field with field_name exists.
@param name: Field Name
@return: Boolean
|
entailment
|
def getConfig(self):
"""Returns dictionary of config entries for Munin Graph.
@return: Dictionary of config entries.
"""
return {'graph': self._graphAttrDict,
'fields': [(field_name, self._fieldAttrDict.get(field_name))
for field_name in self._fieldNameList]}
|
Returns dictionary of config entries for Munin Graph.
@return: Dictionary of config entries.
|
entailment
|
def setVal(self, name, val):
"""Set value for field in graph.
@param name : Graph Name
@param value : Value for field.
"""
if self._autoFixNames:
name = self._fixName(name)
self._fieldValDict[name] = val
|
Set value for field in graph.
@param name : Graph Name
@param value : Value for field.
|
entailment
|
def getVals(self):
"""Returns value list for Munin Graph
@return: List of name-value pairs.
"""
return [(name, self._fieldValDict.get(name))
for name in self._fieldNameList]
|
Returns value list for Munin Graph
@return: List of name-value pairs.
|
entailment
|
def initStats(self, extras=None):
"""Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive.
"""
if extras is not None:
self._extras = extras
if self._extras:
detail = 1
else:
detail = 0
url = "%s://%s:%d/%s?detail=%s" % (self._proto, self._host, self._port,
self._monpath, detail)
response = util.get_url(url, self._user, self._password)
self._statusDict = {}
for line in response.splitlines():
cols = line.split(':')
if not self._statusDict.has_key(cols[0]):
self._statusDict[cols[0]] = {}
self._statusDict[cols[0]][cols[1]] = util.parse_value(cols[2])
|
Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive.
|
entailment
|
def initStats(self):
"""Query and parse Nginx Web Server Status Page."""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port,
self._statuspath)
response = util.get_url(url, self._user, self._password)
self._statusDict = {}
for line in response.splitlines():
mobj = re.match('\s*(\d+)\s+(\d+)\s+(\d+)\s*$', line)
if mobj:
idx = 0
for key in ('accepts','handled','requests'):
idx += 1
self._statusDict[key] = util.parse_value(mobj.group(idx))
else:
for (key,val) in re.findall('(\w+):\s*(\d+)', line):
self._statusDict[key.lower()] = util.parse_value(val)
|
Query and parse Nginx Web Server Status Page.
|
entailment
|
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
|
Read a file into a string
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
proc_info = ProcessInfo()
stats = {}
for (prefix, is_thread) in (('proc', False),
('thread', True)):
graph_name = '%s_status' % prefix
if self.hasGraph(graph_name):
if not stats.has_key(prefix):
stats[prefix] = proc_info.getProcStatStatus(is_thread)
for (fname, stat_key) in (
('unint_sleep', 'uninterruptable_sleep'),
('stopped', 'stopped'),
('defunct', 'defunct'),
('running', 'running'),
('sleep', 'sleep')):
self.setGraphVal(graph_name, fname,
stats[prefix]['status'].get(stat_key))
graph_name = '%s_prio' % prefix
if self.hasGraph(graph_name):
if not stats.has_key(prefix):
stats[prefix] = proc_info.getProcStatStatus(is_thread)
for (fname, stat_key) in (
('high', 'high'),
('low', 'low'),
('norm', 'norm'),
('locked', 'locked_in_mem')):
self.setGraphVal(graph_name, fname,
stats[prefix]['prio'].get(stat_key))
|
Retrieve values for graphs.
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
file_stats = self._fileInfo.getContainerStats()
for contname in self._fileContList:
stats = file_stats.get(contname)
if stats is not None:
if self.hasGraph('rackspace_cloudfiles_container_size'):
self.setGraphVal('rackspace_cloudfiles_container_size', contname,
stats.get('size'))
if self.hasGraph('rackspace_cloudfiles_container_count'):
self.setGraphVal('rackspace_cloudfiles_container_count', contname,
stats.get('count'))
|
Retrieve values for graphs.
|
entailment
|
def plot(results, subjgroup=None, subjname='Subject Group', listgroup=None,
listname='List', subjconds=None, listconds=None, plot_type=None,
plot_style=None, title=None, legend=True, xlim=None, ylim=None,
save_path=None, show=True, ax=None, **kwargs):
"""
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
"""
def plot_acc(data, plot_style, plot_type, listname, subjname, **kwargs):
# set defaul style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Accuracy", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Accuracy", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Accuracy", hue=listname, **kwargs)
return ax
def plot_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", hue=listname, **kwargs)
return ax
def plot_fingerprint(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=listname, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=subjname, **kwargs)
else:
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", **kwargs)
return ax
def plot_fingerprint_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
order = list(tidy_data['Feature'].unique())
if plot_type is 'list':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=listname, order=order, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=subjname, order=order, **kwargs)
else:
ax = plot_func(data=data, x="Feature", y="Clustering Score", order=order, **kwargs)
return ax
def plot_spc(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=listname, **kwargs)
ax.set_xlim(0, data['Position'].max())
return ax
def plot_pnr(data, plot_style, plot_type, listname, subjname, position, list_length, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=listname, **kwargs)
ax.set_xlim(0,list_length-1)
return ax
def plot_lagcrp(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=subjname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=subjname, ax=ax, legend=False, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=listname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=listname, ax=ax, legend=False, **kwargs)
ax.set_xlim(-5,5)
return ax
# if no grouping, set default to iterate over each list independently
subjgroup = subjgroup if subjgroup is not None else results.data.index.levels[0].values
listgroup = listgroup if listgroup is not None else results.data.index.levels[1].values
if subjconds:
# make sure its a list
if type(subjconds) is not list:
subjconds=[subjconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[subjconds, :],:]
# filter subjgroup
subjgroup = filter(lambda x: x in subjconds, subjgroup)
if listconds:
# make sure its a list
if type(listconds) is not list:
listconds=[listconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[:, listconds],:]
# convert to tiny and format for plotting
tidy_data = format2tidy(results.data, subjname, listname, subjgroup, analysis=results.analysis, position=results.position)
if not ax==None:
kwargs['ax']=ax
#plot!
if results.analysis=='accuracy':
ax = plot_acc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='temporal':
ax = plot_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint':
ax = plot_fingerprint(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint_temporal':
ax = plot_fingerprint_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='spc':
ax = plot_spc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='pfr' or results.analysis=='pnr':
ax = plot_pnr(tidy_data, plot_style, plot_type, listname, subjname, position=results.position, list_length=results.list_length, **kwargs)
elif results.analysis=='lagcrp':
ax = plot_lagcrp(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
else:
raise ValueError("Did not recognize analysis.")
# add title
if title:
plt.title(title)
if legend is False:
try:
ax.legend_.remove()
except:
pass
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if save_path:
mpl.rcParams['pdf.fonttype'] = 42
plt.savefig(save_path)
return ax
|
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
|
entailment
|
def getIfStats(self):
"""Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
"""
info_dict = {}
try:
fp = open(ifaceStatsFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading interface stats from file: %s'
% ifaceStatsFile)
for line in data.splitlines():
mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
statline = mobj.group(2)
info_dict[iface] = dict(zip(
('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo',
'rxframe', 'rxcompressed', 'rxmulticast',
'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo',
'txcolls', 'txcarrier', 'txcompressed'),
[int(x) for x in statline.split()]))
return info_dict
|
Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
|
entailment
|
def getIfConfig(self):
"""Return dictionary of Interface Configuration (ifconfig).
@return: Dictionary of if configurations keyed by if name.
"""
conf = {}
try:
out = subprocess.Popen([ipCmd, "addr", "show"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
for line in out.splitlines():
mobj = re.match('^\d+: (\S+):\s+<(\S*)>\s+(\S.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
conf[iface] = {}
continue
mobj = re.match('^\s{4}link\/(.*\S)\s*$', line)
if mobj:
arr = mobj.group(1).split()
if len(arr) > 0:
conf[iface]['type'] = arr[0]
if len(arr) > 1:
conf[iface]['hwaddr'] = arr[1]
continue
mobj = re.match('^\s+(inet|inet6)\s+([\d\.\:A-Za-z]+)\/(\d+)($|\s+.*\S)\s*$', line)
if mobj:
proto = mobj.group(1)
if not conf[iface].has_key(proto):
conf[iface][proto] = []
addrinfo = {}
addrinfo['addr'] = mobj.group(2).lower()
addrinfo['mask'] = int(mobj.group(3))
arr = mobj.group(4).split()
if len(arr) > 0 and arr[0] == 'brd':
addrinfo['brd'] = arr[1]
conf[iface][proto].append(addrinfo)
continue
return conf
|
Return dictionary of Interface Configuration (ifconfig).
@return: Dictionary of if configurations keyed by if name.
|
entailment
|
def getRoutes(self):
"""Get routing table.
@return: List of routes.
"""
routes = []
try:
out = subprocess.Popen([routeCmd, "-n"],
stdout=subprocess.PIPE).communicate()[0]
except:
raise Exception('Execution of command %s failed.' % ipCmd)
lines = out.splitlines()
if len(lines) > 1:
headers = [col.lower() for col in lines[1].split()]
for line in lines[2:]:
routes.append(dict(zip(headers, line.split())))
return routes
|
Get routing table.
@return: List of routes.
|
entailment
|
def execNetstatCmd(self, *args):
"""Execute ps command with positional params args and return result as
list of lines.
@param *args: Positional params for netstat command.
@return: List of output lines
"""
out = util.exec_command([netstatCmd,] + list(args))
return out.splitlines()
|
Execute ps command with positional params args and return result as
list of lines.
@param *args: Positional params for netstat command.
@return: List of output lines
|
entailment
|
def parseNetstatCmd(self, tcp=True, udp=True, ipv4=True, ipv6=True,
include_listen=True, only_listen=False,
show_users=False, show_procs=False,
resolve_hosts=False, resolve_ports=False,
resolve_users=True):
"""Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@return: List of headers and list of rows and columns.
"""
headers = ['proto', 'ipversion', 'recvq', 'sendq',
'localaddr', 'localport','foreignaddr', 'foreignport',
'state']
args = []
proto = []
if ipv4:
proto.append('inet')
if ipv6:
proto.append('inet6')
if len(proto) > 0:
args.append('-A')
args.append(','.join(proto))
if tcp:
args.append('-t')
if udp:
args.append('-u')
if only_listen:
args.append('-l')
elif include_listen:
args.append('-a')
regexp_str = ('(tcp|udp)(\d*)\s+(\d+)\s+(\d+)\s+'
'(\S+):(\w+)\s+(\S+):(\w+|\*)\s+(\w*)')
if show_users:
args.append('-e')
regexp_str += '\s+(\w+)\s+(\d+)'
headers.extend(['user', 'inode'])
if show_procs:
args.append('-p')
regexp_str += '\s+(\S+)'
headers.extend(['pid', 'prog'])
if not resolve_hosts:
args.append('--numeric-hosts')
if not resolve_ports:
args.append('--numeric-ports')
if not resolve_users:
args.append('--numeric-users')
lines = self.execNetstatCmd(*args)
stats = []
regexp = re.compile(regexp_str)
for line in lines[2:]:
mobj = regexp.match(line)
if mobj is not None:
stat = list(mobj.groups())
if stat[1] == '0':
stat[1] = '4'
if stat[8] == '':
stat[8] = None
if show_procs:
proc = stat.pop().split('/')
if len(proc) == 2:
stat.extend(proc)
else:
stat.extend([None, None])
stats.append(stat)
return {'headers': headers, 'stats': stats}
|
Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@return: List of headers and list of rows and columns.
|
entailment
|
def getStats(self, tcp=True, udp=True, ipv4=True, ipv6=True,
include_listen=True, only_listen=False,
show_users=False, show_procs=False,
resolve_hosts=False, resolve_ports=False, resolve_users=True,
**kwargs):
"""Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: List of headers and list of rows and columns.
"""
pinfo = self.parseNetstatCmd(tcp, udp, ipv4, ipv6,
include_listen, only_listen,
show_users, show_procs,
resolve_hosts, resolve_ports, resolve_users)
if pinfo:
if len(kwargs) > 0:
pfilter = util.TableFilter()
pfilter.registerFilters(**kwargs)
stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats'])
return {'headers': pinfo['headers'], 'stats': stats}
else:
return pinfo
else:
return None
|
Execute netstat command and return result as a nested dictionary.
@param tcp: Include TCP ports in ouput if True.
@param udp: Include UDP ports in ouput if True.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param only_listen: Include only listening ports in output if True.
@param show_users: Show info on owning users for ports if True.
@param show_procs: Show info on PID and Program Name attached to
ports if True.
@param resolve_hosts: Resolve IP addresses into names if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param resolve_users: Resolve numeric user IDs to user names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: List of headers and list of rows and columns.
|
entailment
|
def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False,
**kwargs):
"""Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
"""
status_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=include_listen,
ipv4=ipv4, ipv6=ipv6,
**kwargs)
stats = result['stats']
for stat in stats:
if stat is not None:
status = stat[8].lower()
status_dict[status] = status_dict.get(status, 0) + 1
return status_dict
|
Returns the number of TCP endpoints discriminated by status.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param include_listen: Include listening ports in output if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping connection status to the
number of endpoints.
|
entailment
|
def getTCPportConnCount(self, ipv4=True, ipv6=True, resolve_ports=False,
**kwargs):
"""Returns TCP connection counts for each local port.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping port number or name to the
number of established connections.
"""
port_dict = {}
result = self.getStats(tcp=True, udp=False,
include_listen=False, ipv4=ipv4,
ipv6=ipv6, resolve_ports=resolve_ports,
**kwargs)
stats = result['stats']
for stat in stats:
if stat[8] == 'ESTABLISHED':
port_dict[stat[5]] = port_dict.get(5, 0) + 1
return port_dict
|
Returns TCP connection counts for each local port.
@param ipv4: Include IPv4 ports in output if True.
@param ipv6: Include IPv6 ports in output if True.
@param resolve_ports: Resolve numeric ports to names if True.
@param **kwargs: Keyword variables are used for filtering the
results depending on the values of the columns.
Each keyword must correspond to a field name with
an optional suffix:
field: Field equal to value or in list
of values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or
matches with any regex in list of
values.
field_ic_regex: Field matches regex value or
matches with any regex in list of
values using case insensitive
match.
@return: Dictionary mapping port number or name to the
number of established connections.
|
entailment
|
def accuracy_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
"""
def acc(lst):
return len([i for i in np.unique(lst) if i>=0])/(egg.list_length)
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [acc(lst) for lst in recmat]
elif match is 'smooth':
result = np.mean(recmat, axis=1)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(result, axis=0)
|
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
|
entailment
|
def _connect(self):
"""Establish connection to PostgreSQL Database."""
if self._connParams:
self._conn = psycopg2.connect(**self._connParams)
else:
self._conn = psycopg2.connect('')
try:
ver_str = self._conn.get_parameter_status('server_version')
except AttributeError:
ver_str = self.getParam('server_version')
self._version = util.SoftwareVersion(ver_str)
|
Establish connection to PostgreSQL Database.
|
entailment
|
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats
|
Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
|
entailment
|
def _createTotalsDict(self, headers, rows):
"""Utility method that returns totals for database statistics.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Dictionary of totals for each statistics column.
"""
totals = [sum(col) for col in zip(*rows)[1:]]
return dict(zip(headers[1:], totals))
|
Utility method that returns totals for database statistics.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Dictionary of totals for each statistics column.
|
entailment
|
def _simpleQuery(self, query):
"""Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
"""
cur = self._conn.cursor()
cur.execute(query)
row = cur.fetchone()
return util.parse_value(row[0])
|
Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
|
entailment
|
def getParam(self, key):
"""Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
"""
cur = self._conn.cursor()
cur.execute("SHOW %s" % key)
row = cur.fetchone()
return util.parse_value(row[0])
|
Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
|
entailment
|
def getConnectionStats(self):
"""Returns dictionary with number of connections for each database.
@return: Dictionary of database connection statistics.
"""
cur = self._conn.cursor()
cur.execute("""SELECT datname,numbackends FROM pg_stat_database;""")
rows = cur.fetchall()
if rows:
return dict(rows)
else:
return {}
|
Returns dictionary with number of connections for each database.
@return: Dictionary of database connection statistics.
|
entailment
|
def getDatabaseStats(self):
"""Returns database block read, transaction and tuple stats for each
database.
@return: Nested dictionary of stats.
"""
headers = ('datname', 'numbackends', 'xact_commit', 'xact_rollback',
'blks_read', 'blks_hit', 'tup_returned', 'tup_fetched',
'tup_inserted', 'tup_updated', 'tup_deleted', 'disk_size')
cur = self._conn.cursor()
cur.execute("SELECT %s, pg_database_size(datname) FROM pg_stat_database;"
% ",".join(headers[:-1]))
rows = cur.fetchall()
dbstats = self._createStatsDict(headers, rows)
totals = self._createTotalsDict(headers, rows)
return {'databases': dbstats, 'totals': totals}
|
Returns database block read, transaction and tuple stats for each
database.
@return: Nested dictionary of stats.
|
entailment
|
def getLockStatsMode(self):
"""Returns the number of active lock discriminated by lock mode.
@return: : Dictionary of stats.
"""
info_dict = {'all': dict(zip(self.lockModes, (0,) * len(self.lockModes))),
'wait': dict(zip(self.lockModes, (0,) * len(self.lockModes)))}
cur = self._conn.cursor()
cur.execute("SELECT TRIM(mode, 'Lock'), granted, COUNT(*) FROM pg_locks "
"GROUP BY TRIM(mode, 'Lock'), granted;")
rows = cur.fetchall()
for (mode, granted, cnt) in rows:
info_dict['all'][mode] += cnt
if not granted:
info_dict['wait'][mode] += cnt
return info_dict
|
Returns the number of active lock discriminated by lock mode.
@return: : Dictionary of stats.
|
entailment
|
def getLockStatsDB(self):
"""Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
"""
info_dict = {'all': {},
'wait': {}}
cur = self._conn.cursor()
cur.execute("SELECT d.datname, l.granted, COUNT(*) FROM pg_database d "
"JOIN pg_locks l ON d.oid=l.database "
"GROUP BY d.datname, l.granted;")
rows = cur.fetchall()
for (db, granted, cnt) in rows:
info_dict['all'][db] = info_dict['all'].get(db, 0) + cnt
if not granted:
info_dict['wait'][db] = info_dict['wait'].get(db, 0) + cnt
return info_dict
|
Returns the number of active lock discriminated by database.
@return: : Dictionary of stats.
|
entailment
|
def getBgWriterStats(self):
"""Returns Global Background Writer and Checkpoint Activity stats.
@return: Nested dictionary of stats.
"""
info_dict = {}
if self.checkVersion('8.3'):
cur = self._conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("SELECT * FROM pg_stat_bgwriter")
info_dict = cur.fetchone()
return info_dict
|
Returns Global Background Writer and Checkpoint Activity stats.
@return: Nested dictionary of stats.
|
entailment
|
def getXlogStatus(self):
"""Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
"""
inRecovery = None
if self.checkVersion('9.0'):
inRecovery = self._simpleQuery("SELECT pg_is_in_recovery();")
cur = self._conn.cursor()
if inRecovery:
cols = ['pg_last_xlog_receive_location()',
'pg_last_xlog_replay_location()',]
headers = ['xlog_receive_location',
'xlog_replay_location',]
if self.checkVersion('9.1'):
cols.extend(['pg_last_xact_replay_timestamp()',
'pg_is_xlog_replay_paused()',])
headers.extend(['xact_replay_timestamp',
'xlog_replay_paused',])
cur.execute("""SELECT %s;""" % ','.join(cols))
headers = ('xlog_receive_location', 'xlog_replay_location')
else:
cur.execute("""SELECT
pg_current_xlog_location(),
pg_xlogfile_name(pg_current_xlog_location());""")
headers = ('xlog_location', 'xlog_filename')
row = cur.fetchone()
info_dict = dict(zip(headers, row))
if inRecovery is not None:
info_dict['in_recovery'] = inRecovery
return info_dict
|
Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
|
entailment
|
def getSlaveStatus(self):
"""Returns status of replication slaves.
@return: Dictionary of status items.
"""
info_dict = {}
if self.checkVersion('9.1'):
cols = ['procpid', 'usename', 'application_name',
'client_addr', 'client_port', 'backend_start', 'state',
'sent_location', 'write_location', 'flush_location',
'replay_location', 'sync_priority', 'sync_state',]
cur = self._conn.cursor()
cur.execute("""SELECT %s FROM pg_stat_replication;"""
% ','.join(cols))
rows = cur.fetchall()
for row in rows:
info_dict[row[0]] = dict(zip(cols[1:], row[1:]))
else:
return None
return info_dict
|
Returns status of replication slaves.
@return: Dictionary of status items.
|
entailment
|
def _connect(self):
"""Establish connection to MySQL Database."""
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect('')
|
Establish connection to MySQL Database.
|
entailment
|
def getStorageEngines(self):
"""Returns list of supported storage engines.
@return: List of storage engine names.
"""
cur = self._conn.cursor()
cur.execute("""SHOW STORAGE ENGINES;""")
rows = cur.fetchall()
if rows:
return [row[0].lower() for row in rows if row[1] in ['YES', 'DEFAULT']]
else:
return []
|
Returns list of supported storage engines.
@return: List of storage engine names.
|
entailment
|
def getParam(self, key):
"""Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
"""
cur = self._conn.cursor()
cur.execute("SHOW GLOBAL VARIABLES LIKE %s", key)
row = cur.fetchone()
return int(row[1])
|
Returns value of Run-time Database Parameter 'key'.
@param key: Run-time parameter name.
@return: Run-time parameter value.
|
entailment
|
def getParams(self):
"""Returns dictionary of all run-time parameters.
@return: Dictionary of all Run-time parameters.
"""
cur = self._conn.cursor()
cur.execute("SHOW GLOBAL VARIABLES")
rows = cur.fetchall()
info_dict = {}
for row in rows:
key = row[0]
val = util.parse_value(row[1])
info_dict[key] = val
return info_dict
|
Returns dictionary of all run-time parameters.
@return: Dictionary of all Run-time parameters.
|
entailment
|
def getProcessStatus(self):
"""Returns number of processes discriminated by state.
@return: Dictionary mapping process state to number of processes.
"""
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
if row[6] == '':
state = 'idle'
elif row[6] is None:
state = 'other'
else:
state = str(row[6]).replace(' ', '_').lower()
info_dict[state] = info_dict.get(state, 0) + 1
return info_dict
|
Returns number of processes discriminated by state.
@return: Dictionary mapping process state to number of processes.
|
entailment
|
def getProcessDatabase(self):
"""Returns number of processes discriminated by database name.
@return: Dictionary mapping database name to number of processes.
"""
info_dict = {}
cur = self._conn.cursor()
cur.execute("""SHOW FULL PROCESSLIST;""")
rows = cur.fetchall()
if rows:
for row in rows:
db = row[3]
info_dict[db] = info_dict.get(db, 0) + 1
return info_dict
|
Returns number of processes discriminated by database name.
@return: Dictionary mapping database name to number of processes.
|
entailment
|
def getDatabases(self):
"""Returns list of databases.
@return: List of databases.
"""
cur = self._conn.cursor()
cur.execute("""SHOW DATABASES;""")
rows = cur.fetchall()
if rows:
return [row[0] for row in rows]
else:
return []
|
Returns list of databases.
@return: List of databases.
|
entailment
|
def spc_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes probability of a word being recalled (in the appropriate recall list), given its presentation position
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each number represents the probability of recall for a word presented in given position/index
"""
def spc(lst):
d = np.zeros_like(egg.pres.values[0])
inds = np.array(lst[~np.isnan(lst)]).astype(int)
d[inds-1]=1
return d
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [spc(lst) for lst in recmat]
elif match == 'smooth':
result = np.nanmean(recmat, 2)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.mean(result, 0)
|
Computes probability of a word being recalled (in the appropriate recall list), given its presentation position
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each number represents the probability of recall for a word presented in given position/index
|
entailment
|
def _retrieve(self):
"""Query Apache Tomcat Server Status Page in XML format and return
the result as an ElementTree object.
@return: ElementTree object of Status Page XML.
"""
url = "%s://%s:%d/manager/status" % (self._proto, self._host, self._port)
params = {}
params['XML'] = 'true'
response = util.get_url(url, self._user, self._password, params)
tree = ElementTree.XML(response)
return tree
|
Query Apache Tomcat Server Status Page in XML format and return
the result as an ElementTree object.
@return: ElementTree object of Status Page XML.
|
entailment
|
def getMemoryStats(self):
"""Return JVM Memory Stats for Apache Tomcat Server.
@return: Dictionary of memory utilization stats.
"""
if self._statusxml is None:
self.initStats()
node = self._statusxml.find('jvm/memory')
memstats = {}
if node is not None:
for (key,val) in node.items():
memstats[key] = util.parse_value(val)
return memstats
|
Return JVM Memory Stats for Apache Tomcat Server.
@return: Dictionary of memory utilization stats.
|
entailment
|
def getConnectorStats(self):
"""Return dictionary of Connector Stats for Apache Tomcat Server.
@return: Nested dictionary of Connector Stats.
"""
if self._statusxml is None:
self.initStats()
connnodes = self._statusxml.findall('connector')
connstats = {}
if connnodes:
for connnode in connnodes:
namestr = connnode.get('name')
if namestr is not None:
mobj = re.match('(.*)-(\d+)', namestr)
if mobj:
proto = mobj.group(1)
port = int(mobj.group(2))
connstats[port] = {'proto': proto}
for tag in ('threadInfo', 'requestInfo'):
stats = {}
node = connnode.find(tag)
if node is not None:
for (key,val) in node.items():
if re.search('Time$', key):
stats[key] = float(val) / 1000.0
else:
stats[key] = util.parse_value(val)
if stats:
connstats[port][tag] = stats
return connstats
|
Return dictionary of Connector Stats for Apache Tomcat Server.
@return: Nested dictionary of Connector Stats.
|
entailment
|
def load(filepath, update=True):
"""
Loads eggs, fried eggs ands example data
Parameters
----------
filepath : str
Location of file
update : bool
If true, updates egg to latest format
Returns
----------
data : quail.Egg or quail.FriedEgg
Data loaded from disk
"""
if filepath == 'automatic' or filepath == 'example':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/automatic.egg'
return load_egg(fpath)
elif filepath == 'manual':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/manual.egg'
return load_egg(fpath, update=False)
elif filepath == 'naturalistic':
fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/naturalistic.egg'
elif filepath.split('.')[-1]=='egg':
return load_egg(filepath, update=update)
elif filepath.split('.')[-1]=='fegg':
return load_fegg(filepath, update=False)
else:
raise ValueError('Could not load file.')
|
Loads eggs, fried eggs ands example data
Parameters
----------
filepath : str
Location of file
update : bool
If true, updates egg to latest format
Returns
----------
data : quail.Egg or quail.FriedEgg
Data loaded from disk
|
entailment
|
def load_fegg(filepath, update=True):
"""
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
"""
try:
egg = FriedEgg(**dd.io.load(filepath))
except ValueError as e:
print(e)
# if error, try loading old format
with open(filepath, 'rb') as f:
egg = pickle.load(f)
if update:
return egg.crack()
else:
return egg
|
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
|
entailment
|
def load_egg(filepath, update=True):
"""
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
"""
try:
egg = Egg(**dd.io.load(filepath))
except:
# if error, try loading old format
with open(filepath, 'rb') as f:
egg = pickle.load(f)
if update:
if egg.meta:
old_meta = egg.meta
egg.crack()
egg.meta = old_meta
return egg
else:
return egg.crack()
else:
return egg
|
Loads pickled egg
Parameters
----------
filepath : str
Location of pickled egg
update : bool
If true, updates egg to latest format
Returns
----------
egg : Egg data object
A loaded unpickled egg
|
entailment
|
def loadEL(dbpath=None, recpath=None, remove_subs=None, wordpool=None, groupby=None, experiments=None,
filters=None):
'''
Function that loads sql files generated by autoFR Experiment
'''
assert (dbpath is not None), "You must specify a db file or files."
assert (recpath is not None), "You must specify a recall folder."
assert (wordpool is not None), "You must specify a wordpool file."
assert (experiments is not None), "You must specify a list of experiments"
############################################################################
# subfunctions #############################################################
def db2df(db, filter_func=None):
'''
Loads db file and converts to dataframe
'''
db_url = "sqlite:///" + db
table_name = 'turkdemo'
data_column_name = 'datastring'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
data = []
for row in rows:
data.append(row[data_column_name])
# parse each participant's datastring as json object
# and take the 'data' sub-object
data = [json.loads(part)['data'] for part in data if part is not None]
# remove duplicate subject data for debugXG82XV:debug7XPXQA
# data[110] = data[110][348:]
# insert uniqueid field into trialdata in case it wasn't added
# in experiment:
for part in data:
for record in part:
# print(record)
if type(record['trialdata']) is list:
record['trialdata'] = {record['trialdata'][0]:record['trialdata'][1]}
record['trialdata']['uniqueid'] = record['uniqueid']
# flatten nested list so we just have a list of the trialdata recorded
# each time psiturk.recordTrialData(trialdata) was called.
def isNotNumber(s):
try:
float(s)
return False
except ValueError:
return True
data = [record['trialdata'] for part in data for record in part]
# filter out fields that we dont want using isNotNumber function
filtered_data = [{k:v for (k,v) in list(part.items()) if isNotNumber(k)} for part in data]
# Put all subjects' trial data into a dataframe object from the
# 'pandas' python library: one option among many for analysis
data_frame = pd.DataFrame(filtered_data)
data_column_name = 'codeversion'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
versions = []
version_dict = {}
for row in rows:
version_dict[row[0]]=row[data_column_name]
version_col = []
for idx,sub in enumerate(data_frame['uniqueid'].unique()):
for i in range(sum(data_frame['uniqueid']==sub)):
version_col.append(version_dict[sub])
data_frame['exp_version']=version_col
if filter_func:
for idx,filt in enumerate(filter_func):
data_frame = filt(data_frame)
return data_frame
# custom filter to clean db file
def experimenter_filter(data_frame):
data=[]
indexes=[]
for line in data_frame.iterrows():
try:
if json.loads(line[1]['responses'])['Q1'].lower() in ['kirsten','allison','allison\nallison','marisol', 'marisol ','marisiol', 'maddy','campbell', 'campbell field', 'kirsten\nkirsten', 'emily', 'bryan', 'armando', 'armando ortiz',
'maddy/lucy','paxton', 'lucy','campbell\ncampbell','madison','darya','rachael']:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def adaptive_filter(data_frame):
data=[]
indexes=[]
subjcb={}
for line in data_frame.iterrows():
try:
if 'Q2' in json.loads(line[1]['responses']):
delete = False
else:
delete = False
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def experiments_filter(data_frame):
indexes=[]
for line in data_frame.iterrows():
try:
if line[1]['exp_version'] in experiments:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
# this function takes the data frame and returns subject specific data based on the subid variable
def filterData(data_frame,subid):
filtered_stim_data = data_frame[data_frame['stimulus'].notnull() & data_frame['listNumber'].notnull()]
filtered_stim_data = filtered_stim_data[filtered_stim_data['trial_type']=='single-stim']
filtered_stim_data = filtered_stim_data[filtered_stim_data['uniqueid']==subid]
return filtered_stim_data
def createStimDict(data):
stimDict = []
for index, row in data.iterrows():
try:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[0]),
'g' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[1]),
'b' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[2])
},
'location' : {
'top': float(re.findall('top:(.+)\%;', row['stimulus'])[0]),
'left' : float(re.findall('left:(.+)\%', row['stimulus'])[0])
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
except:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : 0,
'g' : 0,
'b' : 0
},
'location' : {
'top': 50,
'left' : 50
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
return stimDict
# this function loads in the recall data into an array of arrays, where each array represents a list of words
def loadRecallData(subid):
recalledWords = []
for i in range(0,16):
try:
f = open(recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
try:
f = open(recpath + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
print(e)
try:
words=[]
altformat=True
for row in spamreader:
if len(row)>1:
recalledWords.append(row)
altformat=False
break
else:
try:
words.append(row[0])
except:
pass
if altformat:
recalledWords.append(words)
except:
print('couldnt process '+ recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt')
return recalledWords
# this function computes accuracy for a series of lists
def computeListAcc(stimDict,recalledWords):
accVec = []
for i in range(0,16):
stim = [stim['text'] for stim in stimDict if stim['listnum']==i]
recalled= recalledWords[i]
acc = 0
tmpstim = stim[:]
for word in recalled:
if word in tmpstim:
tmpstim.remove(word)
acc+=1
accVec.append(acc/len(stim))
return accVec
def getFeatures(stimDict):
stimDict_copy = stimDict[:]
for item in stimDict_copy:
item['location'] = [item['location']['top'], item['location']['left']]
item['color'] = [item['color']['r'], item['color']['g'], item['color']['b']]
item.pop('text', None)
item.pop('listnum', None)
stimDict_copy = [stimDict_copy[i:i+16] for i in range(0, len(stimDict_copy), 16)]
return stimDict_copy
############################################################################
# main program #############################################################
# if its not a list, make it one
if type(dbpath) is not list:
dbpath = [dbpath]
# read in stimulus library
wordpool = pd.read_csv(wordpool)
# add custom filters
if filters:
filter_func = [adaptive_filter, experimeter_filter, experiments_filter] + filters
else:
filter_func = [adaptive_filter, experimenter_filter, experiments_filter]
# load in dbs and convert to df, and filter
dfs = [db2df(db, filter_func=filter_func) for db in dbpath]
# concatenate the db files
df = pd.concat(dfs)
# subjects who have completed the exp
subids = list(df[df['listNumber']==15]['uniqueid'].unique())
# remove problematic subjects
if remove_subs:
for sub in remove_subs:
try:
subids.remove(sub)
except:
print('Could not find subject: ' + sub + ', skipping...')
# set up data structure to load in subjects
if groupby:
pres = [[] for i in range(len(groupby['exp_version']))]
rec = [[] for i in range(len(groupby['exp_version']))]
features = [[] for i in range(len(groupby['exp_version']))]
subs = [[] for i in range(len(groupby['exp_version']))]
# make each groupby item a list
groupby = [exp if type(exp) is list else [exp] for exp in groupby['exp_version']]
else:
pres = [[]]
rec = [[]]
features = [[]]
subs = [[]]
# for each subject that completed the experiment
for idx,sub in enumerate(subids):
# get the subjects data
filteredStimData = filterData(df,sub)
if filteredStimData['exp_version'].values[0] in experiments:
# create stim dict
stimDict = createStimDict(filteredStimData)
sub_data = pd.DataFrame(stimDict)
sub_data['subject']=idx
sub_data['experiment']=filteredStimData['exp_version'].values[0]
sub_data = sub_data[['experiment','subject','listnum','text','category','color','location','firstLetter','size','wordLength']]
# get features from stim dict
feats = getFeatures(stimDict)
# load in the recall data
recalledWords = loadRecallData(sub)
# get experiment version
exp_version = filteredStimData['exp_version'].values[0]
# find the idx of the experiment for this subjects
if groupby:
exp_idx = list(np.where([exp_version in item for item in groupby])[0])
else:
exp_idx = [0]
if exp_idx != []:
pres[exp_idx[0]].append([list(sub_data[sub_data['listnum']==lst]['text'].values) for lst in sub_data['listnum'].unique()])
rec[exp_idx[0]].append(recalledWords)
features[exp_idx[0]].append(feats)
subs[exp_idx[0]].append(sub)
eggs = [Egg(pres=ipres, rec=irec, features=ifeatures, meta={'ids' : isub}) for ipres,irec,ifeatures,isub in zip(pres, rec, features, subs)]
# map feature dictionaries in pres df to rec df
def checkword(x):
if x is None:
return x
else:
try:
return stim_dict[x['item']]
except:
return x
# convert utf-8 bytes type to string
def update_types(egg):
featlist = list(egg.pres.loc[0].loc[0].values.tolist()[0].keys())
def update1df(df):
for sub in range(egg.n_subjects):
for liszt in range(egg.n_lists):
for item in range(len(df.loc[sub].loc[liszt].values.tolist())):
for feat in featlist:
if feat in df.loc[sub].loc[liszt].values.tolist()[item].keys():
if isinstance(df.loc[sub].loc[liszt].values.tolist()[item][feat], np.bytes_):
try:
df.loc[sub].loc[liszt].values.tolist()[item][feat] = str(df.loc[sub].loc[liszt].values.tolist()[item][feat], 'utf-8')
except:
print("Subject " + str(sub) + ", list " + str(liszt) + ", item " + str(item) + ", feature " + str(feat) + ": Could not convert type " + str(type(egg.rec.loc[sub].loc[liszt].values.tolist()[item][feat])) + " to string.")
update1df(egg.pres)
update1df(egg.rec)
for egg in eggs:
update_types(egg)
old_meta = egg.meta
temp_eggs = [egg]
for i in range(egg.n_subjects):
e = egg.crack(subjects=[i])
stim = e.pres.values.ravel()
stim_dict = {str(x['item']) : {k:v for k, v in iter(x.items())} for x in stim}
e.rec = e.rec.applymap(lambda x: checkword(x))
temp_eggs.append(e)
edited_egg = stack_eggs(temp_eggs)
mapped_egg = edited_egg.crack(subjects=[i for i in range(egg.n_subjects,egg.n_subjects*2)])
mapped_egg.meta = old_meta
eggs[eggs.index(egg)] = mapped_egg
if len(eggs)>1:
return eggs
else:
return eggs[0]
|
Function that loads sql files generated by autoFR Experiment
|
entailment
|
def load_example_data(dataset='automatic'):
"""
Loads example data
The automatic and manual example data are eggs containing 30 subjects who completed a free
recall experiment as described here: https://psyarxiv.com/psh48/. The subjects
studied 8 lists of 16 words each and then performed a free recall test.
The naturalistic example data is is an egg containing 17 subjects who viewed and verbally
recounted an episode of the BBC series Sherlock, as described here:
https://www.nature.com/articles/nn.4450. We fit a topic model to hand-annotated
text-descriptions of scenes from the video and used the model to transform both the
scene descriptions and manual transcriptions of each subject's verbal recall. We then
used a Hidden Markov Model to segment the video model and the recall models, by subject,
into k events.
Parameters
----------
dataset : str
The dataset to load. Can be 'automatic', 'manual', or 'naturalistic'. The free recall
audio recordings for the 'automatic' dataset was transcribed by Google
Cloud Speech and the 'manual' dataset was transcribed by humans. The 'naturalistic'
dataset was transcribed by humans and transformed as described above.
Returns
----------
data : quail.Egg
Example data
"""
# can only be auto or manual
assert dataset in ['automatic', 'manual', 'naturalistic'], "Dataset can only be automatic, manual, or naturalistic"
if dataset == 'naturalistic':
# open naturalistic egg
egg = Egg(**dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg'))
else:
# open pickled egg
try:
with open(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg', 'rb') as handle:
egg = pickle.load(handle)
except:
f = dd.io.load(os.path.dirname(os.path.abspath(__file__)) + '/data/' + dataset + '.egg')
egg = Egg(pres=f['pres'], rec=f['rec'], dist_funcs=f['dist_funcs'],
subjgroup=f['subjgroup'], subjname=f['subjname'],
listgroup=f['listgroup'], listname=f['listname'],
date_created=f['date_created'])
return egg.crack()
|
Loads example data
The automatic and manual example data are eggs containing 30 subjects who completed a free
recall experiment as described here: https://psyarxiv.com/psh48/. The subjects
studied 8 lists of 16 words each and then performed a free recall test.
The naturalistic example data is is an egg containing 17 subjects who viewed and verbally
recounted an episode of the BBC series Sherlock, as described here:
https://www.nature.com/articles/nn.4450. We fit a topic model to hand-annotated
text-descriptions of scenes from the video and used the model to transform both the
scene descriptions and manual transcriptions of each subject's verbal recall. We then
used a Hidden Markov Model to segment the video model and the recall models, by subject,
into k events.
Parameters
----------
dataset : str
The dataset to load. Can be 'automatic', 'manual', or 'naturalistic'. The free recall
audio recordings for the 'automatic' dataset was transcribed by Google
Cloud Speech and the 'manual' dataset was transcribed by humans. The 'naturalistic'
dataset was transcribed by humans and transformed as described above.
Returns
----------
data : quail.Egg
Example data
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
fs = FSinfo(self._fshost, self._fsport, self._fspass)
if self.hasGraph('fs_calls'):
count = fs.getCallCount()
self.setGraphVal('fs_calls', 'calls', count)
if self.hasGraph('fs_channels'):
count = fs.getChannelCount()
self.setGraphVal('fs_channels', 'channels', count)
|
Retrieve values for graphs.
|
entailment
|
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
fs = FSinfo(self._fshost, self._fsport, self._fspass)
return fs is not None
|
Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
|
entailment
|
def spsolve(A, b):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
"""
x = UmfpackLU(A).solve(b)
if b.ndim == 2 and b.shape[1] == 1:
# compatibility with scipy.sparse.spsolve quirk
return x.ravel()
else:
return x
|
Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
|
entailment
|
def solve(self, b):
"""
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
"""
if isspmatrix(b):
b = b.toarray()
if b.shape[0] != self._A.shape[1]:
raise ValueError("Shape of b is not compatible with that of A")
b_arr = asarray(b, dtype=self._A.dtype).reshape(b.shape[0], -1)
x = np.zeros((self._A.shape[0], b_arr.shape[1]), dtype=self._A.dtype)
for j in range(b_arr.shape[1]):
x[:,j] = self.umf.solve(UMFPACK_A, self._A, b_arr[:,j], autoTranspose=True)
return x.reshape((self._A.shape[0],) + b.shape[1:])
|
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
|
entailment
|
def solve_sparse(self, B):
"""
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
"""
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols)
|
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
|
entailment
|
def recall_matrix(egg, match='exact', distance='euclidean', features=None):
"""
Computes recall matrix given list of presented and list of recalled words
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
recall_matrix : list of lists of ints
each integer represents the presentation position of the recalled word in a given list in order of recall
0s represent recalled words not presented
negative ints represent words recalled from previous lists
"""
if match in ['best', 'smooth']:
if not features:
features = [k for k,v in egg.pres.loc[0][0].values[0].items() if k!='item']
if not features:
raise('No features found. Cannot match with best or smooth strategy')
if not isinstance(features, list):
features = [features]
if match=='exact':
features=['item']
return _recmat_exact(egg.pres, egg.rec, features)
else:
return _recmat_smooth(egg.pres, egg.rec, features, distance, match)
|
Computes recall matrix given list of presented and list of recalled words
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
recall_matrix : list of lists of ints
each integer represents the presentation position of the recalled word in a given list in order of recall
0s represent recalled words not presented
negative ints represent words recalled from previous lists
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
net_info = NetstatInfo()
if self.hasGraph('netstat_conn_status'):
stats = net_info.getTCPportConnStatus(include_listen=True)
for fname in ('listen', 'established', 'syn_sent', 'syn_recv',
'fin_wait1', 'fin_wait2', 'time_wait',
'close','close_wait', 'last_ack', 'closing',
'unknown',):
self.setGraphVal('netstat_conn_status', fname,
stats.get(fname,0))
if self.hasGraph('netstat_conn_server'):
stats = net_info.getTCPportConnCount(localport=self._port_list)
for srv in self._srv_list:
numconn = 0
for port in self._srv_dict[srv]:
numconn += stats.get(port, 0)
self.setGraphVal('netstat_conn_server', srv, numconn)
|
Retrieve values for graphs.
|
entailment
|
def _parseFreePBXconf(self):
"""Parses FreePBX configuration file /etc/amportal for user and password
for Asterisk Manager Interface.
@return: True if configuration file is found and parsed successfully.
"""
amiuser = None
amipass = None
if os.path.isfile(confFileFreePBX):
try:
fp = open(confFileFreePBX, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading FreePBX configuration file: %s'
% confFileFreePBX)
for (key, val) in re.findall('^(AMPMGR\w+)\s*=\s*(\S+)\s*$',
data, re.MULTILINE):
if key == 'AMPMGRUSER':
amiuser = val
elif key == 'AMPMGRPASS':
amipass = val
if amiuser and amipass:
self._amiuser = amiuser
self._amipass = amipass
return True
return False
|
Parses FreePBX configuration file /etc/amportal for user and password
for Asterisk Manager Interface.
@return: True if configuration file is found and parsed successfully.
|
entailment
|
def _parseAsteriskConf(self):
"""Parses Asterisk configuration file /etc/asterisk/manager.conf for
user and password for Manager Interface. Returns True on success.
@return: True if configuration file is found and parsed successfully.
"""
if os.path.isfile(confFileAMI):
try:
fp = open(confFileAMI, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading Asterisk configuration file: %s'
% confFileAMI)
mobj = re.search('^\[(\w+)\]\s*\r{0,1}\nsecret\s*=\s*(\S+)\s*$',
data, re.MULTILINE)
if mobj:
self._amiuser = mobj.group(1)
self._amipass = mobj.group(2)
return True
return False
|
Parses Asterisk configuration file /etc/asterisk/manager.conf for
user and password for Manager Interface. Returns True on success.
@return: True if configuration file is found and parsed successfully.
|
entailment
|
def _connect(self):
"""Connect to Asterisk Manager Interface."""
try:
if sys.version_info[:2] >= (2,6):
self._conn = telnetlib.Telnet(self._amihost, self._amiport,
connTimeout)
else:
self._conn = telnetlib.Telnet(self._amihost, self._amiport)
except:
raise Exception(
"Connection to Asterisk Manager Interface on "
"host %s and port %s failed."
% (self._amihost, self._amiport)
)
|
Connect to Asterisk Manager Interface.
|
entailment
|
def _sendAction(self, action, attrs=None, chan_vars=None):
"""Send action to Asterisk Manager Interface.
@param action: Action name
@param attrs: Tuple of key-value pairs for action attributes.
@param chan_vars: Tuple of key-value pairs for channel variables.
"""
self._conn.write("Action: %s\r\n" % action)
if attrs:
for (key,val) in attrs:
self._conn.write("%s: %s\r\n" % (key, val))
if chan_vars:
for (key,val) in chan_vars:
self._conn.write("Variable: %s=%s\r\n" % (key, val))
self._conn.write("\r\n")
|
Send action to Asterisk Manager Interface.
@param action: Action name
@param attrs: Tuple of key-value pairs for action attributes.
@param chan_vars: Tuple of key-value pairs for channel variables.
|
entailment
|
def _getResponse(self):
"""Read and parse response from Asterisk Manager Interface.
@return: Dictionary with response key-value pairs.
"""
resp_dict= dict()
resp_str = self._conn.read_until("\r\n\r\n", connTimeout)
for line in resp_str.split("\r\n"):
mobj = re.match('(\w+):\s*(\S.*)$', line);
if mobj:
resp_dict[mobj.group(1)] = mobj.group(2)
else:
mobj = re.match('(.*)--END COMMAND--\s*$', line, flags=re.DOTALL)
if mobj:
resp_dict['command_response'] = mobj.group(1)
return resp_dict
|
Read and parse response from Asterisk Manager Interface.
@return: Dictionary with response key-value pairs.
|
entailment
|
def _getGreeting(self):
"""Read and parse Asterisk Manager Interface Greeting to determine and
set Manager Interface version.
"""
greeting = self._conn.read_until("\r\n", connTimeout)
mobj = re.match('Asterisk Call Manager\/([\d\.]+)\s*$', greeting)
if mobj:
self._ami_version = util.SoftwareVersion(mobj.group(1))
else:
raise Exception("Asterisk Manager Interface version cannot be determined.")
|
Read and parse Asterisk Manager Interface Greeting to determine and
set Manager Interface version.
|
entailment
|
def _initAsteriskVersion(self):
"""Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
"""
if self._ami_version > util.SoftwareVersion('1.0'):
cmd = "core show version"
else:
cmd = "show version"
cmdresp = self.executeCommand(cmd)
mobj = re.match('Asterisk\s*(SVN-branch-|\s)(\d+(\.\d+)*)', cmdresp)
if mobj:
self._asterisk_version = util.SoftwareVersion(mobj.group(2))
else:
raise Exception('Asterisk version cannot be determined.')
|
Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
|
entailment
|
def _login(self):
"""Login to Asterisk Manager Interface."""
self._sendAction("login", (
("Username", self._amiuser),
("Secret", self._amipass),
("Events", "off"),
))
resp = self._getResponse()
if resp.get("Response") == "Success":
return True
else:
raise Exception("Authentication to Asterisk Manager Interface Failed.")
|
Login to Asterisk Manager Interface.
|
entailment
|
def executeCommand(self, command):
"""Send Action to Asterisk Manager Interface to execute CLI Command.
@param command: CLI command to execute.
@return: Command response string.
"""
self._sendAction("Command", (
("Command", command),
))
resp = self._getResponse()
result = resp.get("Response")
if result == "Follows":
return resp.get("command_response")
elif result == "Error":
raise Exception("Execution of Asterisk Manager Interface Command "
"(%s) failed with error message: %s" %
(command, str(resp.get("Message"))))
else:
raise Exception("Execution of Asterisk Manager Interface Command "
"failed: %s" % command)
|
Send Action to Asterisk Manager Interface to execute CLI Command.
@param command: CLI command to execute.
@return: Command response string.
|
entailment
|
def _initModuleList(self):
"""Query Asterisk Manager Interface to initialize internal list of
loaded modules.
CLI Command - core show modules
"""
if self.checkVersion('1.4'):
cmd = "module show"
else:
cmd = "show modules"
cmdresp = self.executeCommand(cmd)
self._modules = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+)\s', line)
if mobj:
self._modules.add(mobj.group(1).lower())
|
Query Asterisk Manager Interface to initialize internal list of
loaded modules.
CLI Command - core show modules
|
entailment
|
def _initApplicationList(self):
"""Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show applications"
else:
cmd = "show applications"
cmdresp = self.executeCommand(cmd)
self._applications = set()
for line in cmdresp.splitlines()[1:-1]:
mobj = re.match('\s*(\S+):', line)
if mobj:
self._applications.add(mobj.group(1).lower())
|
Query Asterisk Manager Interface to initialize internal list of
available applications.
CLI Command - core show applications
|
entailment
|
def _initChannelTypesList(self):
"""Query Asterisk Manager Interface to initialize internal list of
supported channel types.
CLI Command - core show applications
"""
if self.checkVersion('1.4'):
cmd = "core show channeltypes"
else:
cmd = "show channeltypes"
cmdresp = self.executeCommand(cmd)
self._chantypes = set()
for line in cmdresp.splitlines()[2:]:
mobj = re.match('\s*(\S+)\s+.*\s+(yes|no)\s+', line)
if mobj:
self._chantypes.add(mobj.group(1).lower())
|
Query Asterisk Manager Interface to initialize internal list of
supported channel types.
CLI Command - core show applications
|
entailment
|
def hasModule(self, mod):
"""Returns True if mod is among the loaded modules.
@param mod: Module name.
@return: Boolean
"""
if self._modules is None:
self._initModuleList()
return mod in self._modules
|
Returns True if mod is among the loaded modules.
@param mod: Module name.
@return: Boolean
|
entailment
|
def hasApplication(self, app):
"""Returns True if app is among the loaded modules.
@param app: Module name.
@return: Boolean
"""
if self._applications is None:
self._initApplicationList()
return app in self._applications
|
Returns True if app is among the loaded modules.
@param app: Module name.
@return: Boolean
|
entailment
|
def hasChannelType(self, chan):
"""Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
"""
if self._chantypes is None:
self._initChannelTypesList()
return chan in self._chantypes
|
Returns True if chan is among the supported channel types.
@param app: Module name.
@return: Boolean
|
entailment
|
def getCodecList(self):
"""Query Asterisk Manager Interface for defined codecs.
CLI Command - core show codecs
@return: Dictionary - Short Name -> (Type, Long Name)
"""
if self.checkVersion('1.4'):
cmd = "core show codecs"
else:
cmd = "show codecs"
cmdresp = self.executeCommand(cmd)
info_dict = {}
for line in cmdresp.splitlines():
mobj = re.match('\s*(\d+)\s+\((.+)\)\s+\((.+)\)\s+(\w+)\s+(\w+)\s+\((.+)\)$',
line)
if mobj:
info_dict[mobj.group(5)] = (mobj.group(4), mobj.group(6))
return info_dict
|
Query Asterisk Manager Interface for defined codecs.
CLI Command - core show codecs
@return: Dictionary - Short Name -> (Type, Long Name)
|
entailment
|
def getChannelStats(self, chantypes=('dahdi', 'zap', 'sip', 'iax2', 'local')):
"""Query Asterisk Manager Interface for Channel Stats.
CLI Command - core show channels
@return: Dictionary of statistics counters for channels.
Number of active channels for each channel type.
"""
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
info_dict ={}
for chanstr in chantypes:
chan = chanstr.lower()
if chan in ('zap', 'dahdi'):
info_dict['dahdi'] = 0
info_dict['mix'] = 0
else:
info_dict[chan] = 0
for k in ('active_calls', 'active_channels', 'calls_processed'):
info_dict[k] = 0
regexstr = ('(%s)\/(\w+)' % '|'.join(chantypes))
for line in cmdresp.splitlines():
mobj = re.match(regexstr,
line, re.IGNORECASE)
if mobj:
chan_type = mobj.group(1).lower()
chan_id = mobj.group(2).lower()
if chan_type == 'dahdi' or chan_type == 'zap':
if chan_id == 'pseudo':
info_dict['mix'] += 1
else:
info_dict['dahdi'] += 1
else:
info_dict[chan_type] += 1
continue
mobj = re.match('(\d+)\s+(active channel|active call|calls processed)',
line, re.IGNORECASE)
if mobj:
if mobj.group(2) == 'active channel':
info_dict['active_channels'] = int(mobj.group(1))
elif mobj.group(2) == 'active call':
info_dict['active_calls'] = int(mobj.group(1))
elif mobj.group(2) == 'calls processed':
info_dict['calls_processed'] = int(mobj.group(1))
continue
return info_dict
|
Query Asterisk Manager Interface for Channel Stats.
CLI Command - core show channels
@return: Dictionary of statistics counters for channels.
Number of active channels for each channel type.
|
entailment
|
def getPeerStats(self, chantype):
"""Query Asterisk Manager Interface for SIP / IAX2 Peer Stats.
CLI Command - sip show peers
iax2 show peers
@param chantype: Must be 'sip' or 'iax2'.
@return: Dictionary of statistics counters for VoIP Peers.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None
if chan == 'iax2':
cmd = "iax2 show peers"
elif chan == 'sip':
cmd = "sip show peers"
else:
raise AttributeError("Invalid channel type in query for Peer Stats.")
cmdresp = self.executeCommand(cmd)
info_dict = dict(
online = 0, unreachable = 0, lagged = 0,
unknown = 0, unmonitored = 0)
for line in cmdresp.splitlines():
if re.search('ok\s+\(\d+\s+ms\)\s*$', line, re.IGNORECASE):
info_dict['online'] += 1
else:
mobj = re.search('(unreachable|lagged|unknown|unmonitored)\s*$',
line, re.IGNORECASE)
if mobj:
info_dict[mobj.group(1).lower()] += 1
return info_dict
|
Query Asterisk Manager Interface for SIP / IAX2 Peer Stats.
CLI Command - sip show peers
iax2 show peers
@param chantype: Must be 'sip' or 'iax2'.
@return: Dictionary of statistics counters for VoIP Peers.
|
entailment
|
def getVoIPchanStats(self, chantype,
codec_list=('ulaw', 'alaw', 'gsm', 'g729')):
"""Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
"""
chan = chantype.lower()
if not self.hasChannelType(chan):
return None
if chan == 'iax2':
cmd = "iax2 show channels"
elif chan == 'sip':
cmd = "sip show channels"
else:
raise AttributeError("Invalid channel type in query for Channel Stats.")
cmdresp = self.executeCommand(cmd)
lines = cmdresp.splitlines()
headers = re.split('\s\s+', lines[0])
try:
idx = headers.index('Format')
except ValueError:
try:
idx = headers.index('Form')
except:
raise Exception("Error in parsing header line of %s channel stats."
% chan)
codec_list = tuple(codec_list) + ('other', 'none')
info_dict = dict([(k,0) for k in codec_list])
for line in lines[1:-1]:
codec = None
cols = re.split('\s\s+', line)
colcodec = cols[idx]
mobj = re.match('0x\w+\s\((\w+)\)$', colcodec)
if mobj:
codec = mobj.group(1).lower()
elif re.match('\w+$', colcodec):
codec = colcodec.lower()
if codec:
if codec in info_dict:
info_dict[codec] += 1
elif codec == 'nothing' or codec[0:4] == 'unkn':
info_dict['none'] += 1
else:
info_dict['other'] += 1
return info_dict
|
Query Asterisk Manager Interface for SIP / IAX2 Channel / Codec Stats.
CLI Commands - sip show channels
iax2 show channnels
@param chantype: Must be 'sip' or 'iax2'.
@param codec_list: List of codec names to parse.
(Codecs not in the list are summed up to the other
count.)
@return: Dictionary of statistics counters for Active VoIP
Channels.
|
entailment
|
def getConferenceStats(self):
"""Query Asterisk Manager Interface for Conference Room Stats.
CLI Command - meetme list
@return: Dictionary of statistics counters for Conference Rooms.
"""
if not self.hasConference():
return None
if self.checkVersion('1.6'):
cmd = "meetme list"
else:
cmd = "meetme"
cmdresp = self.executeCommand(cmd)
info_dict = dict(active_conferences = 0, conference_users = 0)
for line in cmdresp.splitlines():
mobj = re.match('\w+\s+0(\d+)\s', line)
if mobj:
info_dict['active_conferences'] += 1
info_dict['conference_users'] += int(mobj.group(1))
return info_dict
|
Query Asterisk Manager Interface for Conference Room Stats.
CLI Command - meetme list
@return: Dictionary of statistics counters for Conference Rooms.
|
entailment
|
def getVoicemailStats(self):
"""Query Asterisk Manager Interface for Voicemail Stats.
CLI Command - voicemail show users
@return: Dictionary of statistics counters for Voicemail Accounts.
"""
if not self.hasVoicemail():
return None
if self.checkVersion('1.4'):
cmd = "voicemail show users"
else:
cmd = "show voicemail users"
cmdresp = self.executeCommand(cmd)
info_dict = dict(accounts = 0, avg_messages = 0, max_messages = 0,
total_messages = 0)
for line in cmdresp.splitlines():
mobj = re.match('\w+\s+\w+\s+.*\s+(\d+)\s*$', line)
if mobj:
msgs = int(mobj.group(1))
info_dict['accounts'] += 1
info_dict['total_messages'] += msgs
if msgs > info_dict['max_messages']:
info_dict['max_messages'] = msgs
if info_dict['accounts'] > 0:
info_dict['avg_messages'] = (float(info_dict['total_messages'])
/ info_dict['accounts'])
return info_dict
|
Query Asterisk Manager Interface for Voicemail Stats.
CLI Command - voicemail show users
@return: Dictionary of statistics counters for Voicemail Accounts.
|
entailment
|
def getTrunkStats(self, trunkList):
"""Query Asterisk Manager Interface for Trunk Stats.
CLI Command - core show channels
@param trunkList: List of tuples of one of the two following types:
(Trunk Name, Regular Expression)
(Trunk Name, Regular Expression, MIN, MAX)
@return: Dictionary of trunk utilization statistics.
"""
re_list = []
info_dict = {}
for filt in trunkList:
info_dict[filt[0]] = 0
re_list.append(re.compile(filt[1], re.IGNORECASE))
if self.checkVersion('1.4'):
cmd = "core show channels"
else:
cmd = "show channels"
cmdresp = self.executeCommand(cmd)
for line in cmdresp.splitlines():
for idx in range(len(re_list)):
recomp = re_list[idx]
trunkid = trunkList[idx][0]
mobj = recomp.match(line)
if mobj:
if len(trunkList[idx]) == 2:
info_dict[trunkid] += 1
continue
elif len(trunkList[idx]) == 4:
num = mobj.groupdict().get('num')
if num is not None:
(vmin,vmax) = trunkList[idx][2:4]
if int(num) >= int(vmin) and int(num) <= int(vmax):
info_dict[trunkid] += 1
continue
return info_dict
|
Query Asterisk Manager Interface for Trunk Stats.
CLI Command - core show channels
@param trunkList: List of tuples of one of the two following types:
(Trunk Name, Regular Expression)
(Trunk Name, Regular Expression, MIN, MAX)
@return: Dictionary of trunk utilization statistics.
|
entailment
|
def getQueueStats(self):
"""Query Asterisk Manager Interface for Queue Stats.
CLI Command: queue show
@return: Dictionary of queue stats.
"""
if not self.hasQueue():
return None
info_dict = {}
if self.checkVersion('1.4'):
cmd = "queue show"
else:
cmd = "show queues"
cmdresp = self.executeCommand(cmd)
queue = None
ctxt = None
member_states = ("unknown", "not in use", "in use", "busy", "invalid",
"unavailable", "ringing", "ring+inuse", "on hold",
"total")
member_state_dict = dict([(k.lower().replace(' ', '_'),0)
for k in member_states])
for line in cmdresp.splitlines():
mobj = re.match(r"([\w\-]+)\s+has\s+(\d+)\s+calls\s+"
r"\(max (\d+|unlimited)\)\s+in\s+'(\w+)'\s+strategy\s+"
r"\((.+)\),\s+W:(\d+),\s+C:(\d+),\s+A:(\d+),\s+"
r"SL:([\d\.]+)%\s+within\s+(\d+)s", line)
if mobj:
ctxt = None
queue = mobj.group(1)
info_dict[queue] = {}
info_dict[queue]['queue_len'] = int(mobj.group(2))
try:
info_dict[queue]['queue_maxlen'] = int(mobj.group(3))
except ValueError:
info_dict[queue]['queue_maxlen'] = None
info_dict[queue]['strategy'] = mobj.group(4)
for tkn in mobj.group(5).split(','):
mobjx = re.match(r"\s*(\d+)s\s+(\w+)\s*", tkn)
if mobjx:
info_dict[queue]['avg_' + mobjx.group(2)] = int(mobjx.group(1))
info_dict[queue]['queue_weight'] = int(mobj.group(6))
info_dict[queue]['calls_completed'] = int(mobj.group(7))
info_dict[queue]['calls_abandoned'] = int(mobj.group(8))
info_dict[queue]['sla_pcent'] = float(mobj.group(9))
info_dict[queue]['sla_cutoff'] = int(mobj.group(10))
info_dict[queue]['members'] = member_state_dict.copy()
continue
mobj = re.match('\s+(Members|Callers):\s*$', line)
if mobj:
ctxt = mobj.group(1).lower()
continue
if ctxt == 'members':
mobj = re.match(r"\s+\S.*\s\((.*)\)\s+has\s+taken.*calls", line)
if mobj:
info_dict[queue]['members']['total'] += 1
state = mobj.group(1).lower().replace(' ', '_')
if info_dict[queue]['members'].has_key(state):
info_dict[queue]['members'][state] += 1
else:
raise AttributeError("Undefined queue member state %s"
% state)
continue
return info_dict
|
Query Asterisk Manager Interface for Queue Stats.
CLI Command: queue show
@return: Dictionary of queue stats.
|
entailment
|
def getFaxStatsCounters(self):
"""Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show stats
@return: Dictionary of fax stats.
"""
if not self.hasFax():
return None
info_dict = {}
cmdresp = self.executeCommand('fax show stats')
ctxt = 'general'
for section in cmdresp.strip().split('\n\n')[1:]:
i = 0
for line in section.splitlines():
mobj = re.match('(\S.*\S)\s*:\s*(\d+)\s*$', line)
if mobj:
if not info_dict.has_key(ctxt):
info_dict[ctxt] = {}
info_dict[ctxt][mobj.group(1).lower()] = int(mobj.group(2).lower())
elif i == 0:
ctxt = line.strip().lower()
i += 1
return info_dict
|
Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show stats
@return: Dictionary of fax stats.
|
entailment
|
def getFaxStatsSessions(self):
"""Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
"""
if not self.hasFax():
return None
info_dict = {}
info_dict['total'] = 0
fax_types = ('g.711', 't.38')
fax_operations = ('send', 'recv')
fax_states = ('uninitialized', 'initialized', 'open',
'active', 'inactive', 'complete', 'unknown',)
info_dict['type'] = dict([(k,0) for k in fax_types])
info_dict['operation'] = dict([(k,0) for k in fax_operations])
info_dict['state'] = dict([(k,0) for k in fax_states])
cmdresp = self.executeCommand('fax show sessions')
sections = cmdresp.strip().split('\n\n')
if len(sections) >= 3:
for line in sections[1][1:]:
cols = re.split('\s\s+', line)
if len(cols) == 7:
info_dict['total'] += 1
if cols[3].lower() in fax_types:
info_dict['type'][cols[3].lower()] += 1
if cols[4] == 'receive':
info_dict['operation']['recv'] += 1
elif cols[4] == 'send':
info_dict['operation']['send'] += 1
if cols[5].lower() in fax_states:
info_dict['state'][cols[5].lower()] += 1
return info_dict
|
Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
for iface in self._ifaceList:
if self._reqIfaceList is None or iface in self._reqIfaceList:
if (self.graphEnabled('wanpipe_traffic')
or self.graphEnabled('wanpipe_errors')):
stats = self._ifaceStats.get(iface)
if stats:
graph_name = 'wanpipe_traffic_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxpackets', 'txpackets'):
self.setGraphVal(graph_name, field,
stats.get(field))
graph_name = 'wanpipe_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier',
'rxdrop', 'txdrop', 'rxfifo', 'txfifo'):
self.setGraphVal(graph_name, field,
stats.get(field))
if (self.graphEnabled('wanpipe_pri_errors')
or self.graphEnabled('wanpipe_rxlevel')):
try:
stats = self._wanpipeInfo.getPRIstats(iface)
except:
stats = None
if stats:
graph_name = 'wanpipe_pri_errors_%s' % iface
if self.hasGraph(graph_name):
for field in ('linecodeviolation',
'farendblockerrors',
'crc4errors', 'faserrors'):
self.setGraphVal(graph_name, field,
stats.get(field))
if self.hasGraph('wanpipe_rxlevel'):
self.setGraphVal('wanpipe_pri_rxlevel',
iface, stats.get('rxlevel'))
|
Retrieve values for graphs.
|
entailment
|
def simulate_list(nwords=16, nrec=10, ncats=4):
"""A function to simulate a list"""
# load wordpool
wp = pd.read_csv('data/cut_wordpool.csv')
# get one list
wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16)
wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)]
|
A function to simulate a list
|
entailment
|
def retrieveVals(self):
"""Retrieve values for graphs."""
if self._genStats is None:
self._genStats = self._dbconn.getStats()
if self._genVars is None:
self._genVars = self._dbconn.getParams()
if self.hasGraph('mysql_connections'):
self.setGraphVal('mysql_connections', 'conn',
self._genStats.get('Connections'))
self.setGraphVal('mysql_connections', 'abort_conn',
self._genStats.get('Aborted_connects'))
self.setGraphVal('mysql_connections', 'abort_client',
self._genStats.get('Aborted_clients'))
if self.hasGraph('mysql_traffic'):
self.setGraphVal('mysql_traffic', 'rx',
self._genStats.get('Bytes_received'))
self.setGraphVal('mysql_traffic', 'tx',
self._genStats.get('Bytes_sent'))
if self.graphEnabled('mysql_slowqueries'):
self.setGraphVal('mysql_slowqueries', 'queries',
self._genStats.get('Slow_queries'))
if self.hasGraph('mysql_rowmodifications'):
self.setGraphVal('mysql_rowmodifications', 'insert',
self._genStats.get('Handler_write'))
self.setGraphVal('mysql_rowmodifications', 'update',
self._genStats.get('Handler_update'))
self.setGraphVal('mysql_rowmodifications', 'delete',
self._genStats.get('Handler_delete'))
if self.hasGraph('mysql_rowreads'):
for field in self.getGraphFieldList('mysql_rowreads'):
self.setGraphVal('mysql_rowreads', field,
self._genStats.get('Handler_read_%s' % field))
if self.hasGraph('mysql_tablelocks'):
self.setGraphVal('mysql_tablelocks', 'waited',
self._genStats.get('Table_locks_waited'))
self.setGraphVal('mysql_tablelocks', 'immediate',
self._genStats.get('Table_locks_immediate'))
if self.hasGraph('mysql_threads'):
self.setGraphVal('mysql_threads', 'running',
self._genStats.get('Threads_running'))
self.setGraphVal('mysql_threads', 'idle',
self._genStats.get('Threads_connected')
- self._genStats.get('Threads_running'))
self.setGraphVal('mysql_threads', 'cached',
self._genStats.get('Threads_cached'))
self.setGraphVal('mysql_threads', 'total',
self._genStats.get('Threads_connected')
+ self._genStats.get('Threads_cached'))
if self.hasGraph('mysql_commits_rollbacks'):
self.setGraphVal('mysql_commits_rollbacks', 'commit',
self._genStats.get('Handler_commit'))
self.setGraphVal('mysql_commits_rollbacks', 'rollback',
self._genStats.get('Handler_rollback'))
if self.hasGraph('mysql_qcache_memory'):
try:
total = self._genVars['query_cache_size']
free = self._genStats['Qcache_free_memory']
used = total - free
except KeyError:
free = None
used = None
self.setGraphVal('mysql_qcache_memory', 'used', used)
self.setGraphVal('mysql_qcache_memory', 'free', free)
if self.hasGraph('mysql_qcache_hits'):
try:
hits = self._genStats['Qcache_hits']
misses = self._genStats['Com_select'] - hits
except KeyError:
hits = None
misses = None
self.setGraphVal('mysql_qcache_hits', 'hits', hits)
self.setGraphVal('mysql_qcache_hits', 'misses', misses)
if self.hasGraph('mysql_qcache_prunes'):
self.setGraphVal('mysql_qcache_prunes', 'insert',
self._genStats.get('Qcache_inserts'))
self.setGraphVal('mysql_qcache_prunes', 'prune',
self._genStats.get('Qcache_lowmem_prunes'))
if self.hasGraph('mysql_proc_status'):
self._procStatus = self._dbconn.getProcessStatus()
if self._procStatus:
stats = {}
for field in self.getGraphFieldList('mysql_proc_status'):
stats[field] = 0
for (k, v) in self._procStatus.items():
if stats.has_key(k):
stats[k] = v
else:
stats['unknown'] += v
for (k,v) in stats.items():
self.setGraphVal('mysql_proc_status', k, v)
if self.hasGraph('mysql_proc_db'):
self._procDB = self._dbconn.getProcessDatabase()
for db in self._dbList:
self.setGraphVal('mysql_proc_db', db, self._procDB.get(db, 0))
if self.engineIncluded('myisam'):
if self.hasGraph('mysql_myisam_key_buffer_util'):
try:
bsize = self._genVars['key_cache_block_size']
total = self._genVars['key_buffer_size']
free = self._genStats['Key_blocks_unused'] * bsize
dirty = self._genStats['Key_blocks_not_flushed'] * bsize
clean = total - free - dirty
except KeyError:
total = None
free = None
dirty = None
clean = None
for (field,val) in (('dirty', dirty),
('clean', clean),
('free', free),
('total', total)):
self.setGraphVal('mysql_myisam_key_buffer_util',
field, val)
if self.hasGraph('mysql_myisam_key_read_reqs'):
try:
misses = self._genStats['Key_reads']
hits = (self._genStats['Key_read_requests']
- misses)
except KeyError:
misses = None
hits = None
self.setGraphVal('mysql_myisam_key_read_reqs', 'disk', misses)
self.setGraphVal('mysql_myisam_key_read_reqs', 'buffer', hits)
if self.engineIncluded('innodb'):
if self.hasGraph('mysql_innodb_buffer_pool_util'):
self._genStats['Innodb_buffer_pool_pages_clean'] = (
self._genStats.get('Innodb_buffer_pool_pages_data')
- self._genStats.get('Innodb_buffer_pool_pages_dirty'))
page_size = int(self._genStats.get('Innodb_page_size'))
for field in ('dirty', 'clean', 'misc', 'free', 'total'):
self.setGraphVal('mysql_innodb_buffer_pool_util',
field,
self._genStats.get('Innodb_buffer_pool_pages_%s'
% field)
* page_size)
if self.hasGraph('mysql_innodb_buffer_pool_activity'):
for field in ('created', 'read', 'written'):
self.setGraphVal('mysql_innodb_buffer_pool_activity', field,
self._genStats.get('Innodb_pages_%s' % field))
if self.hasGraph('mysql_innodb_buffer_pool_read_reqs'):
try:
misses = self._genStats['Innodb_buffer_pool_reads']
hits = (self._genStats['Innodb_buffer_pool_read_requests']
- misses)
except KeyError:
misses = None
hits = None
self.setGraphVal('mysql_innodb_buffer_pool_read_reqs', 'disk',
misses)
self.setGraphVal('mysql_innodb_buffer_pool_read_reqs', 'buffer',
hits)
if self.hasGraph('mysql_innodb_row_ops'):
for field in ('inserted', 'updated', 'deleted', 'read'):
self.setGraphVal('mysql_innodb_row_ops', field,
self._genStats.get('Innodb_rows_%s' % field))
|
Retrieve values for graphs.
|
entailment
|
def engineIncluded(self, name):
"""Utility method to check if a storage engine is included in graphs.
@param name: Name of storage engine.
@return: Returns True if included in graphs, False otherwise.
"""
if self._engines is None:
self._engines = self._dbconn.getStorageEngines()
return self.envCheckFilter('engine', name) and name in self._engines
|
Utility method to check if a storage engine is included in graphs.
@param name: Name of storage engine.
@return: Returns True if included in graphs, False otherwise.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.