repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
aouyar/PyMunin
pymunin/plugins/ntphostoffsets.py
MuninNTPhostOffsetsPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) return len(ntpstats) > 0
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) return len(ntpstats) > 0
[ "def", "autoconf", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "ntpstats", "=", "ntpinfo", ".", "getHostOffsets", "(", "self", ".", "_remoteHosts", ")", "return", "len", "(", "ntpstats", ")", ">", "0" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntphostoffsets.py#L138-L146
aouyar/PyMunin
pymunin/plugins/lighttpdstats.py
MuninLighttpdPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = lighttpdInfo.getServerStats() if self.hasGraph('lighttpd_access'): self.setGraphVal('lighttpd_access', 'reqs', stats['Total Accesses']) if self.hasGraph('lighttpd_bytes'): self.setGraphVal('lighttpd_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('lighttpd_servers'): self.setGraphVal('lighttpd_servers', 'busy', stats['BusyServers']) self.setGraphVal('lighttpd_servers', 'idle', stats['IdleServers']) self.setGraphVal('lighttpd_servers', 'max', stats['MaxServers'])
python
def retrieveVals(self): """Retrieve values for graphs.""" lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = lighttpdInfo.getServerStats() if self.hasGraph('lighttpd_access'): self.setGraphVal('lighttpd_access', 'reqs', stats['Total Accesses']) if self.hasGraph('lighttpd_bytes'): self.setGraphVal('lighttpd_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('lighttpd_servers'): self.setGraphVal('lighttpd_servers', 'busy', stats['BusyServers']) self.setGraphVal('lighttpd_servers', 'idle', stats['IdleServers']) self.setGraphVal('lighttpd_servers', 'max', stats['MaxServers'])
[ "def", "retrieveVals", "(", "self", ")", ":", "lighttpdInfo", "=", "LighttpdInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "lighttpdInfo", ".", "getServerStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_access'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_access'", ",", "'reqs'", ",", "stats", "[", "'Total Accesses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_bytes'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_bytes'", ",", "'bytes'", ",", "stats", "[", "'Total kBytes'", "]", "*", "1000", ")", "if", "self", ".", "hasGraph", "(", "'lighttpd_servers'", ")", ":", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'busy'", ",", "stats", "[", "'BusyServers'", "]", ")", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'idle'", ",", "stats", "[", "'IdleServers'", "]", ")", "self", ".", "setGraphVal", "(", "'lighttpd_servers'", ",", "'max'", ",", "stats", "[", "'MaxServers'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/lighttpdstats.py#L124-L138
aouyar/PyMunin
pymunin/plugins/lighttpdstats.py
MuninLighttpdPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return lighttpdInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ lighttpdInfo = LighttpdInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return lighttpdInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "lighttpdInfo", "=", "LighttpdInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "lighttpdInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/lighttpdstats.py#L140-L149
aouyar/PyMunin
pymunin/plugins/diskusagestats.py
MuninDiskUsagePlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" name = 'diskspace' if self.hasGraph(name): for fspath in self._fslist: if self._statsSpace.has_key(fspath): self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) name = 'diskinode' if self.hasGraph(name): for fspath in self._fslist: if self._statsInode.has_key(fspath): self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent'])
python
def retrieveVals(self): """Retrieve values for graphs.""" name = 'diskspace' if self.hasGraph(name): for fspath in self._fslist: if self._statsSpace.has_key(fspath): self.setGraphVal(name, fspath, self._statsSpace[fspath]['inuse_pcent']) name = 'diskinode' if self.hasGraph(name): for fspath in self._fslist: if self._statsInode.has_key(fspath): self.setGraphVal(name, fspath, self._statsInode[fspath]['inuse_pcent'])
[ "def", "retrieveVals", "(", "self", ")", ":", "name", "=", "'diskspace'", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "for", "fspath", "in", "self", ".", "_fslist", ":", "if", "self", ".", "_statsSpace", ".", "has_key", "(", "fspath", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "fspath", ",", "self", ".", "_statsSpace", "[", "fspath", "]", "[", "'inuse_pcent'", "]", ")", "name", "=", "'diskinode'", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "for", "fspath", "in", "self", ".", "_fslist", ":", "if", "self", ".", "_statsInode", ".", "has_key", "(", "fspath", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "fspath", ",", "self", ".", "_statsInode", "[", "fspath", "]", "[", "'inuse_pcent'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskusagestats.py#L121-L134
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.symbolic
def symbolic(self, mtx): """ Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern. """ self.free_symbolic() indx = self._getIndx(mtx) if not assumeSortedIndices: # row/column indices cannot be assumed to be sorted mtx.sort_indices() if self.isReal: status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, mtx.data, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, real, imag, self.control, self.info) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.symbolic, umfStatus[status])) self.mtx = mtx
python
def symbolic(self, mtx): """ Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern. """ self.free_symbolic() indx = self._getIndx(mtx) if not assumeSortedIndices: # row/column indices cannot be assumed to be sorted mtx.sort_indices() if self.isReal: status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, mtx.data, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._symbolic\ = self.funs.symbolic(mtx.shape[0], mtx.shape[1], mtx.indptr, indx, real, imag, self.control, self.info) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.symbolic, umfStatus[status])) self.mtx = mtx
[ "def", "symbolic", "(", "self", ",", "mtx", ")", ":", "self", ".", "free_symbolic", "(", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "if", "not", "assumeSortedIndices", ":", "# row/column indices cannot be assumed to be sorted", "mtx", ".", "sort_indices", "(", ")", "if", "self", ".", "isReal", ":", "status", ",", "self", ".", "_symbolic", "=", "self", ".", "funs", ".", "symbolic", "(", "mtx", ".", "shape", "[", "0", "]", ",", "mtx", ".", "shape", "[", "1", "]", ",", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "real", ",", "imag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "status", ",", "self", ".", "_symbolic", "=", "self", ".", "funs", ".", "symbolic", "(", "mtx", ".", "shape", "[", "0", "]", ",", "mtx", ".", "shape", "[", "1", "]", ",", "mtx", ".", "indptr", ",", "indx", ",", "real", ",", "imag", ",", "self", ".", "control", ",", "self", ".", "info", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "symbolic", ",", "umfStatus", "[", "status", "]", ")", ")", "self", ".", "mtx", "=", "mtx" ]
Perform symbolic object (symbolic LU decomposition) computation for a given sparsity pattern.
[ "Perform", "symbolic", "object", "(", "symbolic", "LU", "decomposition", ")", "computation", "for", "a", "given", "sparsity", "pattern", "." ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L492-L525
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.numeric
def numeric(self, mtx): """ Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary. """ self.free_numeric() if self._symbolic is None: self.symbolic(mtx) indx = self._getIndx(mtx) failCount = 0 while 1: if self.isReal: status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, mtx.data, self._symbolic, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, real, imag, self._symbolic, self.control, self.info) if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: warnings.warn('Singular matrix', UmfpackWarning) break elif status in (UMFPACK_ERROR_different_pattern, UMFPACK_ERROR_invalid_Symbolic_object): # Try again. warnings.warn('Recomputing symbolic', UmfpackWarning) self.symbolic(mtx) failCount += 1 else: failCount += 100 else: break if failCount >= 2: raise RuntimeError('%s failed with %s' % (self.funs.numeric, umfStatus[status]))
python
def numeric(self, mtx): """ Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary. """ self.free_numeric() if self._symbolic is None: self.symbolic(mtx) indx = self._getIndx(mtx) failCount = 0 while 1: if self.isReal: status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, mtx.data, self._symbolic, self.control, self.info) else: real, imag = mtx.data.real.copy(), mtx.data.imag.copy() status, self._numeric\ = self.funs.numeric(mtx.indptr, indx, real, imag, self._symbolic, self.control, self.info) if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: warnings.warn('Singular matrix', UmfpackWarning) break elif status in (UMFPACK_ERROR_different_pattern, UMFPACK_ERROR_invalid_Symbolic_object): # Try again. warnings.warn('Recomputing symbolic', UmfpackWarning) self.symbolic(mtx) failCount += 1 else: failCount += 100 else: break if failCount >= 2: raise RuntimeError('%s failed with %s' % (self.funs.numeric, umfStatus[status]))
[ "def", "numeric", "(", "self", ",", "mtx", ")", ":", "self", ".", "free_numeric", "(", ")", "if", "self", ".", "_symbolic", "is", "None", ":", "self", ".", "symbolic", "(", "mtx", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "failCount", "=", "0", "while", "1", ":", "if", "self", ".", "isReal", ":", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "real", ",", "imag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "status", ",", "self", ".", "_numeric", "=", "self", ".", "funs", ".", "numeric", "(", "mtx", ".", "indptr", ",", "indx", ",", "real", ",", "imag", ",", "self", ".", "_symbolic", ",", "self", ".", "control", ",", "self", ".", "info", ")", "if", "status", "!=", "UMFPACK_OK", ":", "if", "status", "==", "UMFPACK_WARNING_singular_matrix", ":", "warnings", ".", "warn", "(", "'Singular matrix'", ",", "UmfpackWarning", ")", "break", "elif", "status", "in", "(", "UMFPACK_ERROR_different_pattern", ",", "UMFPACK_ERROR_invalid_Symbolic_object", ")", ":", "# Try again.", "warnings", ".", "warn", "(", "'Recomputing symbolic'", ",", "UmfpackWarning", ")", "self", ".", "symbolic", "(", "mtx", ")", "failCount", "+=", "1", "else", ":", "failCount", "+=", "100", "else", ":", "break", "if", "failCount", ">=", "2", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "numeric", ",", "umfStatus", "[", "status", "]", ")", ")" ]
Perform numeric object (LU decomposition) computation using the symbolic decomposition. The symbolic decomposition is (re)computed if necessary.
[ "Perform", "numeric", "object", "(", "LU", "decomposition", ")", "computation", "using", "the", "symbolic", "decomposition", ".", "The", "symbolic", "decomposition", "is", "(", "re", ")", "computed", "if", "necessary", "." ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L532-L577
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.free_symbolic
def free_symbolic(self): """Free symbolic data""" if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
python
def free_symbolic(self): """Free symbolic data""" if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
[ "def", "free_symbolic", "(", "self", ")", ":", "if", "self", ".", "_symbolic", "is", "not", "None", ":", "self", ".", "funs", ".", "free_symbolic", "(", "self", ".", "_symbolic", ")", "self", ".", "_symbolic", "=", "None", "self", ".", "mtx", "=", "None" ]
Free symbolic data
[ "Free", "symbolic", "data" ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L615-L620
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.free_numeric
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
python
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
[ "def", "free_numeric", "(", "self", ")", ":", "if", "self", ".", "_numeric", "is", "not", "None", ":", "self", ".", "funs", ".", "free_numeric", "(", "self", ".", "_numeric", ")", "self", ".", "_numeric", "=", "None", "self", ".", "free_symbolic", "(", ")" ]
Free numeric data
[ "Free", "numeric", "data" ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L625-L630
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.solve
def solve(self, sys, mtx, rhs, autoTranspose=False): """ Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if autoTranspose and self.isCSR: ## # UMFPACK uses CSC internally... if self.family in umfRealTypes: ii = 0 else: ii = 1 if sys in umfSys_transposeMap[ii]: sys = umfSys_transposeMap[ii][sys] else: raise RuntimeError('autoTranspose ambiguous, switch it off') if self._numeric is not None: if self.mtx is not mtx: raise ValueError('must be called with same matrix as numeric()') else: raise RuntimeError('numeric() not called') indx = self._getIndx(mtx) if self.isReal: rhs = rhs.astype(np.float64) sol = np.zeros((mtx.shape[1],), dtype=np.float64) status = self.funs.solve(sys, mtx.indptr, indx, mtx.data, sol, rhs, self._numeric, self.control, self.info) else: rhs = rhs.astype(np.complex128) sol = np.zeros((mtx.shape[1],), dtype=np.complex128) mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() sreal, simag = sol.real.copy(), sol.imag.copy() rreal, rimag = rhs.real.copy(), rhs.imag.copy() status = self.funs.solve(sys, mtx.indptr, indx, mreal, mimag, sreal, simag, rreal, rimag, self._numeric, self.control, self.info) sol.real, sol.imag = sreal, simag # self.funs.report_info( self.control, self.info ) # pause() if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: ## Change inf, nan to zeros. warnings.warn('Zeroing nan and inf entries...', UmfpackWarning) sol[~np.isfinite(sol)] = 0.0 else: raise RuntimeError('%s failed with %s' % (self.funs.solve, umfStatus[status])) econd = 1.0 / self.info[UMFPACK_RCOND] if econd > self.maxCond: msg = '(almost) singular matrix! '\ + '(estimated cond. number: %.2e)' % econd warnings.warn(msg, UmfpackWarning) return sol
python
def solve(self, sys, mtx, rhs, autoTranspose=False): """ Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if autoTranspose and self.isCSR: ## # UMFPACK uses CSC internally... if self.family in umfRealTypes: ii = 0 else: ii = 1 if sys in umfSys_transposeMap[ii]: sys = umfSys_transposeMap[ii][sys] else: raise RuntimeError('autoTranspose ambiguous, switch it off') if self._numeric is not None: if self.mtx is not mtx: raise ValueError('must be called with same matrix as numeric()') else: raise RuntimeError('numeric() not called') indx = self._getIndx(mtx) if self.isReal: rhs = rhs.astype(np.float64) sol = np.zeros((mtx.shape[1],), dtype=np.float64) status = self.funs.solve(sys, mtx.indptr, indx, mtx.data, sol, rhs, self._numeric, self.control, self.info) else: rhs = rhs.astype(np.complex128) sol = np.zeros((mtx.shape[1],), dtype=np.complex128) mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() sreal, simag = sol.real.copy(), sol.imag.copy() rreal, rimag = rhs.real.copy(), rhs.imag.copy() status = self.funs.solve(sys, mtx.indptr, indx, mreal, mimag, sreal, simag, rreal, rimag, self._numeric, self.control, self.info) sol.real, sol.imag = sreal, simag # self.funs.report_info( self.control, self.info ) # pause() if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: ## Change inf, nan to zeros. warnings.warn('Zeroing nan and inf entries...', UmfpackWarning) sol[~np.isfinite(sol)] = 0.0 else: raise RuntimeError('%s failed with %s' % (self.funs.solve, umfStatus[status])) econd = 1.0 / self.info[UMFPACK_RCOND] if econd > self.maxCond: msg = '(almost) singular matrix! '\ + '(estimated cond. number: %.2e)' % econd warnings.warn(msg, UmfpackWarning) return sol
[ "def", "solve", "(", "self", ",", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", "=", "False", ")", ":", "if", "sys", "not", "in", "umfSys", ":", "raise", "ValueError", "(", "'sys must be in'", "%", "umfSys", ")", "if", "autoTranspose", "and", "self", ".", "isCSR", ":", "##", "# UMFPACK uses CSC internally...", "if", "self", ".", "family", "in", "umfRealTypes", ":", "ii", "=", "0", "else", ":", "ii", "=", "1", "if", "sys", "in", "umfSys_transposeMap", "[", "ii", "]", ":", "sys", "=", "umfSys_transposeMap", "[", "ii", "]", "[", "sys", "]", "else", ":", "raise", "RuntimeError", "(", "'autoTranspose ambiguous, switch it off'", ")", "if", "self", ".", "_numeric", "is", "not", "None", ":", "if", "self", ".", "mtx", "is", "not", "mtx", ":", "raise", "ValueError", "(", "'must be called with same matrix as numeric()'", ")", "else", ":", "raise", "RuntimeError", "(", "'numeric() not called'", ")", "indx", "=", "self", ".", "_getIndx", "(", "mtx", ")", "if", "self", ".", "isReal", ":", "rhs", "=", "rhs", ".", "astype", "(", "np", ".", "float64", ")", "sol", "=", "np", ".", "zeros", "(", "(", "mtx", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "status", "=", "self", ".", "funs", ".", "solve", "(", "sys", ",", "mtx", ".", "indptr", ",", "indx", ",", "mtx", ".", "data", ",", "sol", ",", "rhs", ",", "self", ".", "_numeric", ",", "self", ".", "control", ",", "self", ".", "info", ")", "else", ":", "rhs", "=", "rhs", ".", "astype", "(", "np", ".", "complex128", ")", "sol", "=", "np", ".", "zeros", "(", "(", "mtx", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "mreal", ",", "mimag", "=", "mtx", ".", "data", ".", "real", ".", "copy", "(", ")", ",", "mtx", ".", "data", ".", "imag", ".", "copy", "(", ")", "sreal", ",", "simag", "=", "sol", ".", "real", ".", "copy", "(", ")", ",", "sol", ".", "imag", ".", "copy", "(", ")", "rreal", ",", "rimag", "=", "rhs", ".", "real", ".", "copy", "(", ")", ",", "rhs", ".", "imag", ".", "copy", "(", ")", "status", "=", "self", ".", "funs", ".", "solve", "(", "sys", ",", "mtx", ".", "indptr", ",", "indx", ",", "mreal", ",", "mimag", ",", "sreal", ",", "simag", ",", "rreal", ",", "rimag", ",", "self", ".", "_numeric", ",", "self", ".", "control", ",", "self", ".", "info", ")", "sol", ".", "real", ",", "sol", ".", "imag", "=", "sreal", ",", "simag", "# self.funs.report_info( self.control, self.info )", "# pause()", "if", "status", "!=", "UMFPACK_OK", ":", "if", "status", "==", "UMFPACK_WARNING_singular_matrix", ":", "## Change inf, nan to zeros.", "warnings", ".", "warn", "(", "'Zeroing nan and inf entries...'", ",", "UmfpackWarning", ")", "sol", "[", "~", "np", ".", "isfinite", "(", "sol", ")", "]", "=", "0.0", "else", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "solve", ",", "umfStatus", "[", "status", "]", ")", ")", "econd", "=", "1.0", "/", "self", ".", "info", "[", "UMFPACK_RCOND", "]", "if", "econd", ">", "self", ".", "maxCond", ":", "msg", "=", "'(almost) singular matrix! '", "+", "'(estimated cond. number: %.2e)'", "%", "econd", "warnings", ".", "warn", "(", "msg", ",", "UmfpackWarning", ")", "return", "sol" ]
Solution of system of linear equation using the Numeric object. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system.
[ "Solution", "of", "system", "of", "linear", "equation", "using", "the", "Numeric", "object", "." ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L645-L723
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.linsolve
def linsolve(self, sys, mtx, rhs, autoTranspose=False): """ One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if self._numeric is None: self.numeric(mtx) else: if self.mtx is not mtx: self.numeric(mtx) sol = self.solve(sys, mtx, rhs, autoTranspose) self.free_numeric() return sol
python
def linsolve(self, sys, mtx, rhs, autoTranspose=False): """ One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """ if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if self._numeric is None: self.numeric(mtx) else: if self.mtx is not mtx: self.numeric(mtx) sol = self.solve(sys, mtx, rhs, autoTranspose) self.free_numeric() return sol
[ "def", "linsolve", "(", "self", ",", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", "=", "False", ")", ":", "if", "sys", "not", "in", "umfSys", ":", "raise", "ValueError", "(", "'sys must be in'", "%", "umfSys", ")", "if", "self", ".", "_numeric", "is", "None", ":", "self", ".", "numeric", "(", "mtx", ")", "else", ":", "if", "self", ".", "mtx", "is", "not", "mtx", ":", "self", ".", "numeric", "(", "mtx", ")", "sol", "=", "self", ".", "solve", "(", "sys", ",", "mtx", ",", "rhs", ",", "autoTranspose", ")", "self", ".", "free_numeric", "(", ")", "return", "sol" ]
One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters ---------- sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system.
[ "One", "-", "shot", "solution", "of", "system", "of", "linear", "equation", ".", "Reuses", "Numeric", "object", "if", "possible", "." ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L728-L764
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.lu
def lu(self, mtx): """ Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R """ # this should probably be changed mtx = mtx.tocsc() self.numeric(mtx) # first find out how much space to reserve (status, lnz, unz, n_row, n_col, nz_udiag)\ = self.funs.get_lunz(self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_lunz, umfStatus[status])) # allocate storage for decomposition data i_type = mtx.indptr.dtype Lp = np.zeros((n_row+1,), dtype=i_type) Lj = np.zeros((lnz,), dtype=i_type) Lx = np.zeros((lnz,), dtype=np.double) Up = np.zeros((n_col+1,), dtype=i_type) Ui = np.zeros((unz,), dtype=i_type) Ux = np.zeros((unz,), dtype=np.double) P = np.zeros((n_row,), dtype=i_type) Q = np.zeros((n_col,), dtype=i_type) Dx = np.zeros((min(n_row,n_col),), dtype=np.double) Rs = np.zeros((n_row,), dtype=np.double) if self.isReal: (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Up,Ui,Ux, P,Q,Dx,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip)) else: # allocate additional storage for imaginary parts Lz = np.zeros((lnz,), dtype=np.double) Uz = np.zeros((unz,), dtype=np.double) Dz = np.zeros((min(n_row,n_col),), dtype=np.double) (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz, P,Q,Dx,Dz,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) Lxz = np.zeros((lnz,), dtype=np.complex128) Uxz = np.zeros((unz,), dtype=np.complex128) Dxz = np.zeros((min(n_row,n_col),), dtype=np.complex128) Lxz.real,Lxz.imag = Lx,Lz Uxz.real,Uxz.imag = Ux,Uz Dxz.real,Dxz.imag = Dx,Dz L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip))
python
def lu(self, mtx): """ Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R """ # this should probably be changed mtx = mtx.tocsc() self.numeric(mtx) # first find out how much space to reserve (status, lnz, unz, n_row, n_col, nz_udiag)\ = self.funs.get_lunz(self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_lunz, umfStatus[status])) # allocate storage for decomposition data i_type = mtx.indptr.dtype Lp = np.zeros((n_row+1,), dtype=i_type) Lj = np.zeros((lnz,), dtype=i_type) Lx = np.zeros((lnz,), dtype=np.double) Up = np.zeros((n_col+1,), dtype=i_type) Ui = np.zeros((unz,), dtype=i_type) Ux = np.zeros((unz,), dtype=np.double) P = np.zeros((n_row,), dtype=i_type) Q = np.zeros((n_col,), dtype=i_type) Dx = np.zeros((min(n_row,n_col),), dtype=np.double) Rs = np.zeros((n_row,), dtype=np.double) if self.isReal: (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Up,Ui,Ux, P,Q,Dx,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip)) else: # allocate additional storage for imaginary parts Lz = np.zeros((lnz,), dtype=np.double) Uz = np.zeros((unz,), dtype=np.double) Dz = np.zeros((min(n_row,n_col),), dtype=np.double) (status,do_recip) = self.funs.get_numeric(Lp,Lj,Lx,Lz,Up,Ui,Ux,Uz, P,Q,Dx,Dz,Rs, self._numeric) if status != UMFPACK_OK: raise RuntimeError('%s failed with %s' % (self.funs.get_numeric, umfStatus[status])) Lxz = np.zeros((lnz,), dtype=np.complex128) Uxz = np.zeros((unz,), dtype=np.complex128) Dxz = np.zeros((min(n_row,n_col),), dtype=np.complex128) Lxz.real,Lxz.imag = Lx,Lz Uxz.real,Uxz.imag = Ux,Uz Dxz.real,Dxz.imag = Dx,Dz L = sp.csr_matrix((Lxz,Lj,Lp),(n_row,min(n_row,n_col))) U = sp.csc_matrix((Uxz,Ui,Up),(min(n_row,n_col),n_col)) R = Rs return (L,U,P,Q,R,bool(do_recip))
[ "def", "lu", "(", "self", ",", "mtx", ")", ":", "# this should probably be changed", "mtx", "=", "mtx", ".", "tocsc", "(", ")", "self", ".", "numeric", "(", "mtx", ")", "# first find out how much space to reserve", "(", "status", ",", "lnz", ",", "unz", ",", "n_row", ",", "n_col", ",", "nz_udiag", ")", "=", "self", ".", "funs", ".", "get_lunz", "(", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_lunz", ",", "umfStatus", "[", "status", "]", ")", ")", "# allocate storage for decomposition data", "i_type", "=", "mtx", ".", "indptr", ".", "dtype", "Lp", "=", "np", ".", "zeros", "(", "(", "n_row", "+", "1", ",", ")", ",", "dtype", "=", "i_type", ")", "Lj", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "i_type", ")", "Lx", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Up", "=", "np", ".", "zeros", "(", "(", "n_col", "+", "1", ",", ")", ",", "dtype", "=", "i_type", ")", "Ui", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "i_type", ")", "Ux", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "P", "=", "np", ".", "zeros", "(", "(", "n_row", ",", ")", ",", "dtype", "=", "i_type", ")", "Q", "=", "np", ".", "zeros", "(", "(", "n_col", ",", ")", ",", "dtype", "=", "i_type", ")", "Dx", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Rs", "=", "np", ".", "zeros", "(", "(", "n_row", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "if", "self", ".", "isReal", ":", "(", "status", ",", "do_recip", ")", "=", "self", ".", "funs", ".", "get_numeric", "(", "Lp", ",", "Lj", ",", "Lx", ",", "Up", ",", "Ui", ",", "Ux", ",", "P", ",", "Q", ",", "Dx", ",", "Rs", ",", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_numeric", ",", "umfStatus", "[", "status", "]", ")", ")", "L", "=", "sp", ".", "csr_matrix", "(", "(", "Lx", ",", "Lj", ",", "Lp", ")", ",", "(", "n_row", ",", "min", "(", "n_row", ",", "n_col", ")", ")", ")", "U", "=", "sp", ".", "csc_matrix", "(", "(", "Ux", ",", "Ui", ",", "Up", ")", ",", "(", "min", "(", "n_row", ",", "n_col", ")", ",", "n_col", ")", ")", "R", "=", "Rs", "return", "(", "L", ",", "U", ",", "P", ",", "Q", ",", "R", ",", "bool", "(", "do_recip", ")", ")", "else", ":", "# allocate additional storage for imaginary parts", "Lz", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Uz", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "Dz", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "double", ")", "(", "status", ",", "do_recip", ")", "=", "self", ".", "funs", ".", "get_numeric", "(", "Lp", ",", "Lj", ",", "Lx", ",", "Lz", ",", "Up", ",", "Ui", ",", "Ux", ",", "Uz", ",", "P", ",", "Q", ",", "Dx", ",", "Dz", ",", "Rs", ",", "self", ".", "_numeric", ")", "if", "status", "!=", "UMFPACK_OK", ":", "raise", "RuntimeError", "(", "'%s failed with %s'", "%", "(", "self", ".", "funs", ".", "get_numeric", ",", "umfStatus", "[", "status", "]", ")", ")", "Lxz", "=", "np", ".", "zeros", "(", "(", "lnz", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Uxz", "=", "np", ".", "zeros", "(", "(", "unz", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Dxz", "=", "np", ".", "zeros", "(", "(", "min", "(", "n_row", ",", "n_col", ")", ",", ")", ",", "dtype", "=", "np", ".", "complex128", ")", "Lxz", ".", "real", ",", "Lxz", ".", "imag", "=", "Lx", ",", "Lz", "Uxz", ".", "real", ",", "Uxz", ".", "imag", "=", "Ux", ",", "Uz", "Dxz", ".", "real", ",", "Dxz", ".", "imag", "=", "Dx", ",", "Dz", "L", "=", "sp", ".", "csr_matrix", "(", "(", "Lxz", ",", "Lj", ",", "Lp", ")", ",", "(", "n_row", ",", "min", "(", "n_row", ",", "n_col", ")", ")", ")", "U", "=", "sp", ".", "csc_matrix", "(", "(", "Uxz", ",", "Ui", ",", "Up", ")", ",", "(", "min", "(", "n_row", ",", "n_col", ")", ",", "n_col", ")", ")", "R", "=", "Rs", "return", "(", "L", ",", "U", ",", "P", ",", "Q", ",", "R", ",", "bool", "(", "do_recip", ")", ")" ]
Perform LU decomposition. For a given matrix A, the decomposition satisfies:: LU = PRAQ when do_recip is true LU = P(R^-1)AQ when do_recip is false Parameters ---------- mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. Returns ------- L : csr_matrix Lower triangular m-by-min(m,n) CSR matrix U : csc_matrix Upper triangular min(m,n)-by-n CSC matrix P : ndarray Vector of row permutations Q : ndarray Vector of column permutations R : ndarray Vector of diagonal row scalings do_recip : bool Whether R is R^-1 or R
[ "Perform", "LU", "decomposition", "." ]
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L801-L903
ContextLab/quail
quail/fingerprint.py
order_stick
def order_stick(presenter, egg, dist_dict, strategy, fingerprint): """ Reorders a list according to strategy """ def compute_feature_stick(features, weights, alpha): '''create a 'stick' of feature weights''' feature_stick = [] for f, w in zip(features, weights): feature_stick+=[f]*int(np.power(w,alpha)*100) return feature_stick def reorder_list(egg, feature_stick, dist_dict, tau): def compute_stimulus_stick(s, tau): '''create a 'stick' of feature weights''' feature_stick = [[weights[feature]]*round(weights[feature]**alpha)*100 for feature in w] return [item for sublist in feature_stick for item in sublist] # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # turn pres and features into np arrays pres_arr = np.array(pres) features_arr = np.array(features) # starting with a random word reordered_list = [] reordered_features = [] # start with a random choice idx = np.random.choice(len(pres), 1)[0] # original inds inds = list(range(len(pres))) # keep track of the indices inds_used = [idx] # get the word current_word = pres[idx] # get the features dict current_features = features[idx] # append that word to the reordered list reordered_list.append(current_word) # append the features to the reordered list reordered_features.append(current_features) # loop over the word list for i in range(len(pres)-1): # sample from the stick feature_sample = feature_stick[np.random.choice(len(feature_stick), 1)[0]] # indices left inds_left = [ind for ind in inds if ind not in inds_used] # make a copy of the words filtering out the already used ones words_left = pres[inds_left] # get word distances for the word dists_left = np.array([dist_dict[current_word][word][feature_sample] for word in words_left]) # features left features_left = features[inds_left] # normalize distances dists_left_max = np.max(dists_left) if dists_left_max>0: dists_left_norm = dists_left/np.max(dists_left) else: dists_left_norm = dists_left # get the min dists_left_min = np.min(-dists_left_norm) # invert the word distances to turn distance->similarity dists_left_inv = - dists_left_norm - dists_left_min + .01 # create a word stick words_stick = [] for word, dist in zip(words_left, dists_left_inv): words_stick+=[word]*int(np.power(dist,tau)*100) next_word = np.random.choice(words_stick) next_word_idx = np.where(pres==next_word)[0] inds_used.append(next_word_idx) reordered_list.append(next_word) reordered_features.append(features[next_word_idx][0]) return Egg(pres=[reordered_list], rec=[reordered_list], features=[[reordered_features]], dist_funcs=dist_funcs) # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # get params needed for list reordering features = presenter.get_params('fingerprint').get_features() alpha = presenter.get_params('alpha') tau = presenter.get_params('tau') weights = fingerprint # invert the weights if strategy is destabilize if strategy=='destabilize': weights = 1 - weights # compute feature stick feature_stick = compute_feature_stick(features, weights, alpha) # reorder list return reorder_list(egg, feature_stick, dist_dict, tau)
python
def order_stick(presenter, egg, dist_dict, strategy, fingerprint): """ Reorders a list according to strategy """ def compute_feature_stick(features, weights, alpha): '''create a 'stick' of feature weights''' feature_stick = [] for f, w in zip(features, weights): feature_stick+=[f]*int(np.power(w,alpha)*100) return feature_stick def reorder_list(egg, feature_stick, dist_dict, tau): def compute_stimulus_stick(s, tau): '''create a 'stick' of feature weights''' feature_stick = [[weights[feature]]*round(weights[feature]**alpha)*100 for feature in w] return [item for sublist in feature_stick for item in sublist] # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # turn pres and features into np arrays pres_arr = np.array(pres) features_arr = np.array(features) # starting with a random word reordered_list = [] reordered_features = [] # start with a random choice idx = np.random.choice(len(pres), 1)[0] # original inds inds = list(range(len(pres))) # keep track of the indices inds_used = [idx] # get the word current_word = pres[idx] # get the features dict current_features = features[idx] # append that word to the reordered list reordered_list.append(current_word) # append the features to the reordered list reordered_features.append(current_features) # loop over the word list for i in range(len(pres)-1): # sample from the stick feature_sample = feature_stick[np.random.choice(len(feature_stick), 1)[0]] # indices left inds_left = [ind for ind in inds if ind not in inds_used] # make a copy of the words filtering out the already used ones words_left = pres[inds_left] # get word distances for the word dists_left = np.array([dist_dict[current_word][word][feature_sample] for word in words_left]) # features left features_left = features[inds_left] # normalize distances dists_left_max = np.max(dists_left) if dists_left_max>0: dists_left_norm = dists_left/np.max(dists_left) else: dists_left_norm = dists_left # get the min dists_left_min = np.min(-dists_left_norm) # invert the word distances to turn distance->similarity dists_left_inv = - dists_left_norm - dists_left_min + .01 # create a word stick words_stick = [] for word, dist in zip(words_left, dists_left_inv): words_stick+=[word]*int(np.power(dist,tau)*100) next_word = np.random.choice(words_stick) next_word_idx = np.where(pres==next_word)[0] inds_used.append(next_word_idx) reordered_list.append(next_word) reordered_features.append(features[next_word_idx][0]) return Egg(pres=[reordered_list], rec=[reordered_list], features=[[reordered_features]], dist_funcs=dist_funcs) # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # get params needed for list reordering features = presenter.get_params('fingerprint').get_features() alpha = presenter.get_params('alpha') tau = presenter.get_params('tau') weights = fingerprint # invert the weights if strategy is destabilize if strategy=='destabilize': weights = 1 - weights # compute feature stick feature_stick = compute_feature_stick(features, weights, alpha) # reorder list return reorder_list(egg, feature_stick, dist_dict, tau)
[ "def", "order_stick", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "fingerprint", ")", ":", "def", "compute_feature_stick", "(", "features", ",", "weights", ",", "alpha", ")", ":", "'''create a 'stick' of feature weights'''", "feature_stick", "=", "[", "]", "for", "f", ",", "w", "in", "zip", "(", "features", ",", "weights", ")", ":", "feature_stick", "+=", "[", "f", "]", "*", "int", "(", "np", ".", "power", "(", "w", ",", "alpha", ")", "*", "100", ")", "return", "feature_stick", "def", "reorder_list", "(", "egg", ",", "feature_stick", ",", "dist_dict", ",", "tau", ")", ":", "def", "compute_stimulus_stick", "(", "s", ",", "tau", ")", ":", "'''create a 'stick' of feature weights'''", "feature_stick", "=", "[", "[", "weights", "[", "feature", "]", "]", "*", "round", "(", "weights", "[", "feature", "]", "**", "alpha", ")", "*", "100", "for", "feature", "in", "w", "]", "return", "[", "item", "for", "sublist", "in", "feature_stick", "for", "item", "in", "sublist", "]", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# turn pres and features into np arrays", "pres_arr", "=", "np", ".", "array", "(", "pres", ")", "features_arr", "=", "np", ".", "array", "(", "features", ")", "# starting with a random word", "reordered_list", "=", "[", "]", "reordered_features", "=", "[", "]", "# start with a random choice", "idx", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "pres", ")", ",", "1", ")", "[", "0", "]", "# original inds", "inds", "=", "list", "(", "range", "(", "len", "(", "pres", ")", ")", ")", "# keep track of the indices", "inds_used", "=", "[", "idx", "]", "# get the word", "current_word", "=", "pres", "[", "idx", "]", "# get the features dict", "current_features", "=", "features", "[", "idx", "]", "# append that word to the reordered list", "reordered_list", ".", "append", "(", "current_word", ")", "# append the features to the reordered list", "reordered_features", ".", "append", "(", "current_features", ")", "# loop over the word list", "for", "i", "in", "range", "(", "len", "(", "pres", ")", "-", "1", ")", ":", "# sample from the stick", "feature_sample", "=", "feature_stick", "[", "np", ".", "random", ".", "choice", "(", "len", "(", "feature_stick", ")", ",", "1", ")", "[", "0", "]", "]", "# indices left", "inds_left", "=", "[", "ind", "for", "ind", "in", "inds", "if", "ind", "not", "in", "inds_used", "]", "# make a copy of the words filtering out the already used ones", "words_left", "=", "pres", "[", "inds_left", "]", "# get word distances for the word", "dists_left", "=", "np", ".", "array", "(", "[", "dist_dict", "[", "current_word", "]", "[", "word", "]", "[", "feature_sample", "]", "for", "word", "in", "words_left", "]", ")", "# features left", "features_left", "=", "features", "[", "inds_left", "]", "# normalize distances", "dists_left_max", "=", "np", ".", "max", "(", "dists_left", ")", "if", "dists_left_max", ">", "0", ":", "dists_left_norm", "=", "dists_left", "/", "np", ".", "max", "(", "dists_left", ")", "else", ":", "dists_left_norm", "=", "dists_left", "# get the min", "dists_left_min", "=", "np", ".", "min", "(", "-", "dists_left_norm", ")", "# invert the word distances to turn distance->similarity", "dists_left_inv", "=", "-", "dists_left_norm", "-", "dists_left_min", "+", ".01", "# create a word stick", "words_stick", "=", "[", "]", "for", "word", ",", "dist", "in", "zip", "(", "words_left", ",", "dists_left_inv", ")", ":", "words_stick", "+=", "[", "word", "]", "*", "int", "(", "np", ".", "power", "(", "dist", ",", "tau", ")", "*", "100", ")", "next_word", "=", "np", ".", "random", ".", "choice", "(", "words_stick", ")", "next_word_idx", "=", "np", ".", "where", "(", "pres", "==", "next_word", ")", "[", "0", "]", "inds_used", ".", "append", "(", "next_word_idx", ")", "reordered_list", ".", "append", "(", "next_word", ")", "reordered_features", ".", "append", "(", "features", "[", "next_word_idx", "]", "[", "0", "]", ")", "return", "Egg", "(", "pres", "=", "[", "reordered_list", "]", ",", "rec", "=", "[", "reordered_list", "]", ",", "features", "=", "[", "[", "reordered_features", "]", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# get params needed for list reordering", "features", "=", "presenter", ".", "get_params", "(", "'fingerprint'", ")", ".", "get_features", "(", ")", "alpha", "=", "presenter", ".", "get_params", "(", "'alpha'", ")", "tau", "=", "presenter", ".", "get_params", "(", "'tau'", ")", "weights", "=", "fingerprint", "# invert the weights if strategy is destabilize", "if", "strategy", "==", "'destabilize'", ":", "weights", "=", "1", "-", "weights", "# compute feature stick", "feature_stick", "=", "compute_feature_stick", "(", "features", ",", "weights", ",", "alpha", ")", "# reorder list", "return", "reorder_list", "(", "egg", ",", "feature_stick", ",", "dist_dict", ",", "tau", ")" ]
Reorders a list according to strategy
[ "Reorders", "a", "list", "according", "to", "strategy" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L346-L464
ContextLab/quail
quail/fingerprint.py
stick_perm
def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method""" # seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
python
def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method""" # seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
[ "def", "stick_perm", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ")", ":", "# seed RNG", "np", ".", "random", ".", "seed", "(", ")", "# unpack egg", "egg_pres", ",", "egg_rec", ",", "egg_features", ",", "egg_dist_funcs", "=", "parse_egg", "(", "egg", ")", "# reorder", "regg", "=", "order_stick", "(", "presenter", ",", "egg", ",", "dist_dict", ",", "strategy", ")", "# unpack regg", "regg_pres", ",", "regg_rec", ",", "regg_features", ",", "regg_dist_funcs", "=", "parse_egg", "(", "regg", ")", "# # get the order", "regg_pres", "=", "list", "(", "regg_pres", ")", "egg_pres", "=", "list", "(", "egg_pres", ")", "idx", "=", "[", "egg_pres", ".", "index", "(", "r", ")", "for", "r", "in", "regg_pres", "]", "# compute weights", "weights", "=", "compute_feature_weights_dict", "(", "list", "(", "regg_pres", ")", ",", "list", "(", "regg_pres", ")", ",", "list", "(", "regg_features", ")", ",", "dist_dict", ")", "# save out the order", "orders", "=", "idx", "return", "weights", ",", "orders" ]
Computes weights for one reordering using stick-breaking method
[ "Computes", "weights", "for", "one", "reordering", "using", "stick", "-", "breaking", "method" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L577-L603
ContextLab/quail
quail/fingerprint.py
compute_distances_dict
def compute_distances_dict(egg): """ Creates a nested dict of distances """ pres, rec, features, dist_funcs = parse_egg(egg) pres_list = list(pres) features_list = list(features) # initialize dist dict distances = {} # for each word in the list for idx1, item1 in enumerate(pres_list): distances[item1]={} # for each word in the list for idx2, item2 in enumerate(pres_list): distances[item1][item2]={} # for each feature in dist_funcs for feature in dist_funcs: distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature]) return distances
python
def compute_distances_dict(egg): """ Creates a nested dict of distances """ pres, rec, features, dist_funcs = parse_egg(egg) pres_list = list(pres) features_list = list(features) # initialize dist dict distances = {} # for each word in the list for idx1, item1 in enumerate(pres_list): distances[item1]={} # for each word in the list for idx2, item2 in enumerate(pres_list): distances[item1][item2]={} # for each feature in dist_funcs for feature in dist_funcs: distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature]) return distances
[ "def", "compute_distances_dict", "(", "egg", ")", ":", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "pres_list", "=", "list", "(", "pres", ")", "features_list", "=", "list", "(", "features", ")", "# initialize dist dict", "distances", "=", "{", "}", "# for each word in the list", "for", "idx1", ",", "item1", "in", "enumerate", "(", "pres_list", ")", ":", "distances", "[", "item1", "]", "=", "{", "}", "# for each word in the list", "for", "idx2", ",", "item2", "in", "enumerate", "(", "pres_list", ")", ":", "distances", "[", "item1", "]", "[", "item2", "]", "=", "{", "}", "# for each feature in dist_funcs", "for", "feature", "in", "dist_funcs", ":", "distances", "[", "item1", "]", "[", "item2", "]", "[", "feature", "]", "=", "builtin_dist_funcs", "[", "dist_funcs", "[", "feature", "]", "]", "(", "features_list", "[", "idx1", "]", "[", "feature", "]", ",", "features_list", "[", "idx2", "]", "[", "feature", "]", ")", "return", "distances" ]
Creates a nested dict of distances
[ "Creates", "a", "nested", "dict", "of", "distances" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L637-L660
ContextLab/quail
quail/fingerprint.py
compute_feature_weights_dict
def compute_feature_weights_dict(pres_list, rec_list, feature_list, dist_dict): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) < 2: print('Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word # dists = [dist_dict[c][j][feature] for j in dist_dict[c]] # distance between current and next word c_dist = dist_dict[c][n][feature] # filter dists removing the words that have already been recalled # dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) dists_filt = [dist_dict[c][j][feature] for j in dist_dict[c] if j not in past_words] # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == c_dist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
python
def compute_feature_weights_dict(pres_list, rec_list, feature_list, dist_dict): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) < 2: print('Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word # dists = [dist_dict[c][j][feature] for j in dist_dict[c]] # distance between current and next word c_dist = dist_dict[c][n][feature] # filter dists removing the words that have already been recalled # dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) dists_filt = [dist_dict[c][j][feature] for j in dist_dict[c] if j not in past_words] # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == c_dist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
[ "def", "compute_feature_weights_dict", "(", "pres_list", ",", "rec_list", ",", "feature_list", ",", "dist_dict", ")", ":", "# initialize the weights object for just this list", "weights", "=", "{", "}", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", "[", "]", "# return default list if there is not enough data to compute the fingerprint", "if", "len", "(", "rec_list", ")", "<", "2", ":", "print", "(", "'Not enough recalls to compute fingerprint, returning default fingerprint.. (everything is .5)'", ")", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", ".5", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]", "# initialize past word list", "past_words", "=", "[", "]", "past_idxs", "=", "[", "]", "# loop over words", "for", "i", "in", "range", "(", "len", "(", "rec_list", ")", "-", "1", ")", ":", "# grab current word", "c", "=", "rec_list", "[", "i", "]", "# grab the next word", "n", "=", "rec_list", "[", "i", "+", "1", "]", "# if both recalled words are in the encoding list and haven't been recalled before", "if", "(", "c", "in", "pres_list", "and", "n", "in", "pres_list", ")", "and", "(", "c", "not", "in", "past_words", "and", "n", "not", "in", "past_words", ")", ":", "# for each feature", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "# get the distance vector for the current word", "# dists = [dist_dict[c][j][feature] for j in dist_dict[c]]", "# distance between current and next word", "c_dist", "=", "dist_dict", "[", "c", "]", "[", "n", "]", "[", "feature", "]", "# filter dists removing the words that have already been recalled", "# dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs])", "dists_filt", "=", "[", "dist_dict", "[", "c", "]", "[", "j", "]", "[", "feature", "]", "for", "j", "in", "dist_dict", "[", "c", "]", "if", "j", "not", "in", "past_words", "]", "# get indices", "avg_rank", "=", "np", ".", "mean", "(", "np", ".", "where", "(", "np", ".", "sort", "(", "dists_filt", ")", "[", ":", ":", "-", "1", "]", "==", "c_dist", ")", "[", "0", "]", "+", "1", ")", "# compute the weight", "weights", "[", "feature", "]", ".", "append", "(", "avg_rank", "/", "len", "(", "dists_filt", ")", ")", "# keep track of what has been recalled already", "past_idxs", ".", "append", "(", "pres_list", ".", "index", "(", "c", ")", ")", "past_words", ".", "append", "(", "c", ")", "# average over the cluster scores for a particular dimension", "for", "feature", "in", "weights", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "weights", "[", "feature", "]", "=", "np", ".", "nanmean", "(", "weights", "[", "feature", "]", ")", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]" ]
Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension
[ "Compute", "clustering", "scores", "along", "a", "set", "of", "feature", "dimensions" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L662-L739
ContextLab/quail
quail/fingerprint.py
Fingerprint.update
def update(self, egg, permute=False, nperms=1000, parallel=False): """ In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None """ # increment n self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: # multiply states by n c = self.state*self.n # update state self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights # update the history self.history.append(next_weights)
python
def update(self, egg, permute=False, nperms=1000, parallel=False): """ In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None """ # increment n self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: # multiply states by n c = self.state*self.n # update state self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights # update the history self.history.append(next_weights)
[ "def", "update", "(", "self", ",", "egg", ",", "permute", "=", "False", ",", "nperms", "=", "1000", ",", "parallel", "=", "False", ")", ":", "# increment n", "self", ".", "n", "+=", "1", "next_weights", "=", "np", ".", "nanmean", "(", "_analyze_chunk", "(", "egg", ",", "analysis", "=", "fingerprint_helper", ",", "analysis_type", "=", "'fingerprint'", ",", "pass_features", "=", "True", ",", "permute", "=", "permute", ",", "n_perms", "=", "nperms", ",", "parallel", "=", "parallel", ")", ".", "values", ",", "0", ")", "if", "self", ".", "state", "is", "not", "None", ":", "# multiply states by n", "c", "=", "self", ".", "state", "*", "self", ".", "n", "# update state", "self", ".", "state", "=", "np", ".", "nansum", "(", "np", ".", "array", "(", "[", "c", ",", "next_weights", "]", ")", ",", "axis", "=", "0", ")", "/", "(", "self", ".", "n", "+", "1", ")", "else", ":", "self", ".", "state", "=", "next_weights", "# update the history", "self", ".", "history", ".", "append", "(", "next_weights", ")" ]
In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None
[ "In", "-", "place", "method", "that", "updates", "fingerprint", "with", "new", "data" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L83-L121
ContextLab/quail
quail/fingerprint.py
OptimalPresenter.order
def order(self, egg, method='permute', nperms=2500, strategy=None, distfun='correlation', fingerprint=None): """ Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint """ def order_perm(self, egg, dist_dict, strategy, nperm, distperm, fingerprint): """ This function re-sorts a list by computing permutations of a given list and choosing the one that maximizes/minimizes variance. """ # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # length of list pres_len = len(pres) weights = [] orders = [] for i in range(nperms): x = rand_perm(pres, features, dist_dict, dist_funcs) weights.append(x[0]) orders.append(x[1]) weights = np.array(weights) orders = np.array(orders) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])]) def order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint): # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(stick_perm)(self, egg, dist_dict, strategy) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) def order_best_choice(self, egg, dist_dict, nperms, distfun, fingerprint): # get strategy strategy = self.strategy # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(choice_perm)(self, egg, dist_dict) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) # if strategy is not set explicitly, default to the class strategy if strategy is None: strategy = self.strategy dist_dict = compute_distances_dict(egg) if fingerprint is None: fingerprint = self.get_params('fingerprint').state elif isinstance(fingerprint, Fingerprint): fingerprint = fingerprint.state else: print('using custom fingerprint') if (strategy=='random') or (method=='random'): return shuffle_egg(egg) elif method=='permute': return order_perm(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='stick': return order_stick(self, egg, dist_dict, strategy, fingerprint) # elif method=='best_stick': return order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='best_choice': return order_best_choice(self, egg, dist_dict, nperms, fingerprint)
python
def order(self, egg, method='permute', nperms=2500, strategy=None, distfun='correlation', fingerprint=None): """ Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint """ def order_perm(self, egg, dist_dict, strategy, nperm, distperm, fingerprint): """ This function re-sorts a list by computing permutations of a given list and choosing the one that maximizes/minimizes variance. """ # parse egg pres, rec, features, dist_funcs = parse_egg(egg) # length of list pres_len = len(pres) weights = [] orders = [] for i in range(nperms): x = rand_perm(pres, features, dist_dict, dist_funcs) weights.append(x[0]) orders.append(x[1]) weights = np.array(weights) orders = np.array(orders) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distperm)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])]) def order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint): # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(stick_perm)(self, egg, dist_dict, strategy) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) def order_best_choice(self, egg, dist_dict, nperms, distfun, fingerprint): # get strategy strategy = self.strategy # parse egg pres, rec, features, dist_funcs = parse_egg(egg) results = Parallel(n_jobs=multiprocessing.cpu_count())( delayed(choice_perm)(self, egg, dist_dict) for i in range(nperms)) weights = np.array([x[0] for x in results]) orders = np.array([x[1] for x in results]) # find the closest (or farthest) if strategy=='stabilize': closest = orders[np.nanargmin(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() elif strategy=='destabilize': closest = orders[np.nanargmax(cdist(np.array(fingerprint, ndmin=2), weights, distfun)),:].astype(int).tolist() # return a re-sorted egg return Egg(pres=[list(pres[closest])], rec=[list(pres[closest])], features=[list(features[closest])], dist_funcs=dist_funcs) # if strategy is not set explicitly, default to the class strategy if strategy is None: strategy = self.strategy dist_dict = compute_distances_dict(egg) if fingerprint is None: fingerprint = self.get_params('fingerprint').state elif isinstance(fingerprint, Fingerprint): fingerprint = fingerprint.state else: print('using custom fingerprint') if (strategy=='random') or (method=='random'): return shuffle_egg(egg) elif method=='permute': return order_perm(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='stick': return order_stick(self, egg, dist_dict, strategy, fingerprint) # elif method=='best_stick': return order_best_stick(self, egg, dist_dict, strategy, nperms, distfun, fingerprint) # elif method=='best_choice': return order_best_choice(self, egg, dist_dict, nperms, fingerprint)
[ "def", "order", "(", "self", ",", "egg", ",", "method", "=", "'permute'", ",", "nperms", "=", "2500", ",", "strategy", "=", "None", ",", "distfun", "=", "'correlation'", ",", "fingerprint", "=", "None", ")", ":", "def", "order_perm", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperm", ",", "distperm", ",", "fingerprint", ")", ":", "\"\"\"\n This function re-sorts a list by computing permutations of a given\n list and choosing the one that maximizes/minimizes variance.\n \"\"\"", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "# length of list", "pres_len", "=", "len", "(", "pres", ")", "weights", "=", "[", "]", "orders", "=", "[", "]", "for", "i", "in", "range", "(", "nperms", ")", ":", "x", "=", "rand_perm", "(", "pres", ",", "features", ",", "dist_dict", ",", "dist_funcs", ")", "weights", ".", "append", "(", "x", "[", "0", "]", ")", "orders", ".", "append", "(", "x", "[", "1", "]", ")", "weights", "=", "np", ".", "array", "(", "weights", ")", "orders", "=", "np", ".", "array", "(", "orders", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distperm", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distperm", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ")", "def", "order_best_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", ":", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", "(", "delayed", "(", "stick_perm", ")", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ")", "for", "i", "in", "range", "(", "nperms", ")", ")", "weights", "=", "np", ".", "array", "(", "[", "x", "[", "0", "]", "for", "x", "in", "results", "]", ")", "orders", "=", "np", ".", "array", "(", "[", "x", "[", "1", "]", "for", "x", "in", "results", "]", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "def", "order_best_choice", "(", "self", ",", "egg", ",", "dist_dict", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", ":", "# get strategy", "strategy", "=", "self", ".", "strategy", "# parse egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "results", "=", "Parallel", "(", "n_jobs", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", "(", "delayed", "(", "choice_perm", ")", "(", "self", ",", "egg", ",", "dist_dict", ")", "for", "i", "in", "range", "(", "nperms", ")", ")", "weights", "=", "np", ".", "array", "(", "[", "x", "[", "0", "]", "for", "x", "in", "results", "]", ")", "orders", "=", "np", ".", "array", "(", "[", "x", "[", "1", "]", "for", "x", "in", "results", "]", ")", "# find the closest (or farthest)", "if", "strategy", "==", "'stabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmin", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "elif", "strategy", "==", "'destabilize'", ":", "closest", "=", "orders", "[", "np", ".", "nanargmax", "(", "cdist", "(", "np", ".", "array", "(", "fingerprint", ",", "ndmin", "=", "2", ")", ",", "weights", ",", "distfun", ")", ")", ",", ":", "]", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "# return a re-sorted egg", "return", "Egg", "(", "pres", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "rec", "=", "[", "list", "(", "pres", "[", "closest", "]", ")", "]", ",", "features", "=", "[", "list", "(", "features", "[", "closest", "]", ")", "]", ",", "dist_funcs", "=", "dist_funcs", ")", "# if strategy is not set explicitly, default to the class strategy", "if", "strategy", "is", "None", ":", "strategy", "=", "self", ".", "strategy", "dist_dict", "=", "compute_distances_dict", "(", "egg", ")", "if", "fingerprint", "is", "None", ":", "fingerprint", "=", "self", ".", "get_params", "(", "'fingerprint'", ")", ".", "state", "elif", "isinstance", "(", "fingerprint", ",", "Fingerprint", ")", ":", "fingerprint", "=", "fingerprint", ".", "state", "else", ":", "print", "(", "'using custom fingerprint'", ")", "if", "(", "strategy", "==", "'random'", ")", "or", "(", "method", "==", "'random'", ")", ":", "return", "shuffle_egg", "(", "egg", ")", "elif", "method", "==", "'permute'", ":", "return", "order_perm", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", "#", "elif", "method", "==", "'stick'", ":", "return", "order_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "fingerprint", ")", "#", "elif", "method", "==", "'best_stick'", ":", "return", "order_best_stick", "(", "self", ",", "egg", ",", "dist_dict", ",", "strategy", ",", "nperms", ",", "distfun", ",", "fingerprint", ")", "#", "elif", "method", "==", "'best_choice'", ":", "return", "order_best_choice", "(", "self", ",", "egg", ",", "dist_dict", ",", "nperms", ",", "fingerprint", ")" ]
Reorders a list of stimuli to match a fingerprint Parameters ---------- egg : quail.Egg Data to compute fingerprint method : str Method to re-sort list. Can be 'stick' or 'permute' (default: permute) nperms : int Number of permutations to use. Only used if method='permute'. (default: 2500) strategy : str or None The strategy to use to reorder the list. This can be 'stabilize', 'destabilize', 'random' or None. If None, the self.strategy field will be used. (default: None) distfun : str or function The distance function to reorder the list fingerprint to the target fingerprint. Can be any distance function supported by scipy.spatial.distance.cdist. For more info, see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html (default: euclidean) fingerprint : quail.Fingerprint or np.array Fingerprint (or just the state of a fingerprint) to reorder by. If None, the list will be reordered according to the fingerprint attached to the presenter object. Returns ---------- egg : quail.Egg Egg re-sorted to match fingerprint
[ "Reorders", "a", "list", "of", "stimuli", "to", "match", "a", "fingerprint" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L203-L344
aouyar/PyMunin
pysysinfo/phpopc.py
OPCinfo.initStats
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/opcinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
python
def initStats(self, extras=None): """Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) #with open('/tmp/opcinfo.json') as f: # response = f.read() self._statusDict = json.loads(response)
[ "def", "initStats", "(", "self", ",", "extras", "=", "None", ")", ":", "url", "=", "\"%s://%s:%d/%s\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_monpath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "#with open('/tmp/opcinfo.json') as f:", "# response = f.read()", "self", ".", "_statusDict", "=", "json", ".", "loads", "(", "response", ")" ]
Query and parse Web Server Status Page. @param extras: Include extra metrics, which can be computationally more expensive.
[ "Query", "and", "parse", "Web", "Server", "Status", "Page", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/phpopc.py#L72-L83
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._getDevMajorMinor
def _getDevMajorMinor(self, devpath): """Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor). """ fstat = os.stat(devpath) if stat.S_ISBLK(fstat.st_mode): return(os.major(fstat.st_rdev), os.minor(fstat.st_rdev)) else: raise ValueError("The file %s is not a valid block device." % devpath)
python
def _getDevMajorMinor(self, devpath): """Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor). """ fstat = os.stat(devpath) if stat.S_ISBLK(fstat.st_mode): return(os.major(fstat.st_rdev), os.minor(fstat.st_rdev)) else: raise ValueError("The file %s is not a valid block device." % devpath)
[ "def", "_getDevMajorMinor", "(", "self", ",", "devpath", ")", ":", "fstat", "=", "os", ".", "stat", "(", "devpath", ")", "if", "stat", ".", "S_ISBLK", "(", "fstat", ".", "st_mode", ")", ":", "return", "(", "os", ".", "major", "(", "fstat", ".", "st_rdev", ")", ",", "os", ".", "minor", "(", "fstat", ".", "st_rdev", ")", ")", "else", ":", "raise", "ValueError", "(", "\"The file %s is not a valid block device.\"", "%", "devpath", ")" ]
Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor).
[ "Return", "major", "and", "minor", "device", "number", "for", "block", "device", "path", "devpath", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L53-L63
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._getUniqueDev
def _getUniqueDev(self, devpath): """Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix. """ realpath = os.path.realpath(devpath) mobj = re.match('\/dev\/(.*)$', realpath) if mobj: dev = mobj.group(1) if dev in self._diskStats: return dev else: try: (major, minor) = self._getDevMajorMinor(realpath) except: return None return self._mapMajorMinor2dev.get((major, minor)) return None
python
def _getUniqueDev(self, devpath): """Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix. """ realpath = os.path.realpath(devpath) mobj = re.match('\/dev\/(.*)$', realpath) if mobj: dev = mobj.group(1) if dev in self._diskStats: return dev else: try: (major, minor) = self._getDevMajorMinor(realpath) except: return None return self._mapMajorMinor2dev.get((major, minor)) return None
[ "def", "_getUniqueDev", "(", "self", ",", "devpath", ")", ":", "realpath", "=", "os", ".", "path", ".", "realpath", "(", "devpath", ")", "mobj", "=", "re", ".", "match", "(", "'\\/dev\\/(.*)$'", ",", "realpath", ")", "if", "mobj", ":", "dev", "=", "mobj", ".", "group", "(", "1", ")", "if", "dev", "in", "self", ".", "_diskStats", ":", "return", "dev", "else", ":", "try", ":", "(", "major", ",", "minor", ")", "=", "self", ".", "_getDevMajorMinor", "(", "realpath", ")", "except", ":", "return", "None", "return", "self", ".", "_mapMajorMinor2dev", ".", "get", "(", "(", "major", ",", "minor", ")", ")", "return", "None" ]
Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix.
[ "Return", "unique", "device", "for", "any", "block", "device", "path", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L65-L84
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initBlockMajorMap
def _initBlockMajorMap(self): """Parses /proc/devices to initialize device class - major number map for block devices. """ self._mapMajorDevclass = {} try: fp = open(devicesFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading device information from file: %s' % devicesFile) skip = True for line in data.splitlines(): if skip: if re.match('block.*:', line, re.IGNORECASE): skip = False else: mobj = re.match('\s*(\d+)\s+([\w\-]+)$', line) if mobj: major = int(mobj.group(1)) devtype = mobj.group(2) self._mapMajorDevclass[major] = devtype if devtype == 'device-mapper': self._dmMajorNum = major
python
def _initBlockMajorMap(self): """Parses /proc/devices to initialize device class - major number map for block devices. """ self._mapMajorDevclass = {} try: fp = open(devicesFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading device information from file: %s' % devicesFile) skip = True for line in data.splitlines(): if skip: if re.match('block.*:', line, re.IGNORECASE): skip = False else: mobj = re.match('\s*(\d+)\s+([\w\-]+)$', line) if mobj: major = int(mobj.group(1)) devtype = mobj.group(2) self._mapMajorDevclass[major] = devtype if devtype == 'device-mapper': self._dmMajorNum = major
[ "def", "_initBlockMajorMap", "(", "self", ")", ":", "self", ".", "_mapMajorDevclass", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "devicesFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading device information from file: %s'", "%", "devicesFile", ")", "skip", "=", "True", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "if", "skip", ":", "if", "re", ".", "match", "(", "'block.*:'", ",", "line", ",", "re", ".", "IGNORECASE", ")", ":", "skip", "=", "False", "else", ":", "mobj", "=", "re", ".", "match", "(", "'\\s*(\\d+)\\s+([\\w\\-]+)$'", ",", "line", ")", "if", "mobj", ":", "major", "=", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "devtype", "=", "mobj", ".", "group", "(", "2", ")", "self", ".", "_mapMajorDevclass", "[", "major", "]", "=", "devtype", "if", "devtype", "==", "'device-mapper'", ":", "self", ".", "_dmMajorNum", "=", "major" ]
Parses /proc/devices to initialize device class - major number map for block devices.
[ "Parses", "/", "proc", "/", "devices", "to", "initialize", "device", "class", "-", "major", "number", "map", "for", "block", "devices", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L86-L111
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initDMinfo
def _initDMinfo(self): """Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapLVtuple2dm = {} self._mapLVname2dm = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): mobj = re.match('([a-zA-Z0-9+_.\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\-]*)$', file) if mobj: path = os.path.join(devmapperDir, file) (major, minor) = self._getDevMajorMinor(path) if major == self._dmMajorNum: vg = mobj.group(1).replace('--', '-') lv = mobj.group(2).replace('--', '-') dmdev = "dm-%d" % minor self._mapLVtuple2dm[(vg,lv)] = dmdev self._mapLVname2dm[file] = dmdev if not vg in self._vgTree: self._vgTree[vg] = [] self._vgTree[vg].append(lv)
python
def _initDMinfo(self): """Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapLVtuple2dm = {} self._mapLVname2dm = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): mobj = re.match('([a-zA-Z0-9+_.\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\-]*)$', file) if mobj: path = os.path.join(devmapperDir, file) (major, minor) = self._getDevMajorMinor(path) if major == self._dmMajorNum: vg = mobj.group(1).replace('--', '-') lv = mobj.group(2).replace('--', '-') dmdev = "dm-%d" % minor self._mapLVtuple2dm[(vg,lv)] = dmdev self._mapLVname2dm[file] = dmdev if not vg in self._vgTree: self._vgTree[vg] = [] self._vgTree[vg].append(lv)
[ "def", "_initDMinfo", "(", "self", ")", ":", "self", ".", "_mapLVtuple2dm", "=", "{", "}", "self", ".", "_mapLVname2dm", "=", "{", "}", "self", ".", "_vgTree", "=", "{", "}", "if", "self", ".", "_dmMajorNum", "is", "None", ":", "self", ".", "_initBlockMajorMap", "(", ")", "for", "file", "in", "os", ".", "listdir", "(", "devmapperDir", ")", ":", "mobj", "=", "re", ".", "match", "(", "'([a-zA-Z0-9+_.\\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\\-]*)$'", ",", "file", ")", "if", "mobj", ":", "path", "=", "os", ".", "path", ".", "join", "(", "devmapperDir", ",", "file", ")", "(", "major", ",", "minor", ")", "=", "self", ".", "_getDevMajorMinor", "(", "path", ")", "if", "major", "==", "self", ".", "_dmMajorNum", ":", "vg", "=", "mobj", ".", "group", "(", "1", ")", ".", "replace", "(", "'--'", ",", "'-'", ")", "lv", "=", "mobj", ".", "group", "(", "2", ")", ".", "replace", "(", "'--'", ",", "'-'", ")", "dmdev", "=", "\"dm-%d\"", "%", "minor", "self", ".", "_mapLVtuple2dm", "[", "(", "vg", ",", "lv", ")", "]", "=", "dmdev", "self", ".", "_mapLVname2dm", "[", "file", "]", "=", "dmdev", "if", "not", "vg", "in", "self", ".", "_vgTree", ":", "self", ".", "_vgTree", "[", "vg", "]", "=", "[", "]", "self", ".", "_vgTree", "[", "vg", "]", ".", "append", "(", "lv", ")" ]
Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs.
[ "Check", "files", "in", "/", "dev", "/", "mapper", "to", "initialize", "data", "structures", "for", "mappings", "between", "device", "-", "mapper", "devices", "minor", "device", "numbers", "VGs", "and", "LVs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L113-L137
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initFilesystemInfo
def _initFilesystemInfo(self): """Initialize filesystem to device mappings.""" self._mapFSpathDev = {} fsinfo = FilesystemInfo() for fs in fsinfo.getFSlist(): devpath = fsinfo.getFSdev(fs) dev = self._getUniqueDev(devpath) if dev is not None: self._mapFSpathDev[fs] = dev
python
def _initFilesystemInfo(self): """Initialize filesystem to device mappings.""" self._mapFSpathDev = {} fsinfo = FilesystemInfo() for fs in fsinfo.getFSlist(): devpath = fsinfo.getFSdev(fs) dev = self._getUniqueDev(devpath) if dev is not None: self._mapFSpathDev[fs] = dev
[ "def", "_initFilesystemInfo", "(", "self", ")", ":", "self", ".", "_mapFSpathDev", "=", "{", "}", "fsinfo", "=", "FilesystemInfo", "(", ")", "for", "fs", "in", "fsinfo", ".", "getFSlist", "(", ")", ":", "devpath", "=", "fsinfo", ".", "getFSdev", "(", "fs", ")", "dev", "=", "self", ".", "_getUniqueDev", "(", "devpath", ")", "if", "dev", "is", "not", "None", ":", "self", ".", "_mapFSpathDev", "[", "fs", "]", "=", "dev" ]
Initialize filesystem to device mappings.
[ "Initialize", "filesystem", "to", "device", "mappings", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L139-L147
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initSwapInfo
def _initSwapInfo(self): """Initialize swap partition to device mappings.""" self._swapList = [] sysinfo = SystemInfo() for (swap,attrs) in sysinfo.getSwapStats().iteritems(): if attrs['type'] == 'partition': dev = self._getUniqueDev(swap) if dev is not None: self._swapList.append(dev)
python
def _initSwapInfo(self): """Initialize swap partition to device mappings.""" self._swapList = [] sysinfo = SystemInfo() for (swap,attrs) in sysinfo.getSwapStats().iteritems(): if attrs['type'] == 'partition': dev = self._getUniqueDev(swap) if dev is not None: self._swapList.append(dev)
[ "def", "_initSwapInfo", "(", "self", ")", ":", "self", ".", "_swapList", "=", "[", "]", "sysinfo", "=", "SystemInfo", "(", ")", "for", "(", "swap", ",", "attrs", ")", "in", "sysinfo", ".", "getSwapStats", "(", ")", ".", "iteritems", "(", ")", ":", "if", "attrs", "[", "'type'", "]", "==", "'partition'", ":", "dev", "=", "self", ".", "_getUniqueDev", "(", "swap", ")", "if", "dev", "is", "not", "None", ":", "self", ".", "_swapList", ".", "append", "(", "dev", ")" ]
Initialize swap partition to device mappings.
[ "Initialize", "swap", "partition", "to", "device", "mappings", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L149-L157
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initDiskStats
def _initDiskStats(self): """Parse and initialize block device I/O stats in /proc/diskstats.""" self._diskStats = {} self._mapMajorMinor2dev = {} try: fp = open(diskStatsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading disk stats from file: %s' % diskStatsFile) for line in data.splitlines(): cols = line.split() dev = cols.pop(2) if len(cols) == 13: self._diskStats[dev] = dict(zip( ('major', 'minor', 'rios', 'rmerges', 'rsect', 'rticks', 'wios', 'wmerges', 'wsect', 'wticks', 'ios_active', 'totticks', 'rqticks'), [int(x) for x in cols])) elif len(cols) == 6: self._diskStats[dev] = dict(zip( ('major', 'minor', 'rios', 'rsect', 'wios', 'wsect'), [int(x) for x in cols])) else: continue self._diskStats[dev]['rbytes'] = ( self._diskStats[dev]['rsect'] * sectorSize) self._diskStats[dev]['wbytes'] = ( self._diskStats[dev]['wsect'] * sectorSize) self._mapMajorMinor2dev[(int(cols[0]), int(cols[1]))] = dev
python
def _initDiskStats(self): """Parse and initialize block device I/O stats in /proc/diskstats.""" self._diskStats = {} self._mapMajorMinor2dev = {} try: fp = open(diskStatsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading disk stats from file: %s' % diskStatsFile) for line in data.splitlines(): cols = line.split() dev = cols.pop(2) if len(cols) == 13: self._diskStats[dev] = dict(zip( ('major', 'minor', 'rios', 'rmerges', 'rsect', 'rticks', 'wios', 'wmerges', 'wsect', 'wticks', 'ios_active', 'totticks', 'rqticks'), [int(x) for x in cols])) elif len(cols) == 6: self._diskStats[dev] = dict(zip( ('major', 'minor', 'rios', 'rsect', 'wios', 'wsect'), [int(x) for x in cols])) else: continue self._diskStats[dev]['rbytes'] = ( self._diskStats[dev]['rsect'] * sectorSize) self._diskStats[dev]['wbytes'] = ( self._diskStats[dev]['wsect'] * sectorSize) self._mapMajorMinor2dev[(int(cols[0]), int(cols[1]))] = dev
[ "def", "_initDiskStats", "(", "self", ")", ":", "self", ".", "_diskStats", "=", "{", "}", "self", ".", "_mapMajorMinor2dev", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "diskStatsFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading disk stats from file: %s'", "%", "diskStatsFile", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "cols", "=", "line", ".", "split", "(", ")", "dev", "=", "cols", ".", "pop", "(", "2", ")", "if", "len", "(", "cols", ")", "==", "13", ":", "self", ".", "_diskStats", "[", "dev", "]", "=", "dict", "(", "zip", "(", "(", "'major'", ",", "'minor'", ",", "'rios'", ",", "'rmerges'", ",", "'rsect'", ",", "'rticks'", ",", "'wios'", ",", "'wmerges'", ",", "'wsect'", ",", "'wticks'", ",", "'ios_active'", ",", "'totticks'", ",", "'rqticks'", ")", ",", "[", "int", "(", "x", ")", "for", "x", "in", "cols", "]", ")", ")", "elif", "len", "(", "cols", ")", "==", "6", ":", "self", ".", "_diskStats", "[", "dev", "]", "=", "dict", "(", "zip", "(", "(", "'major'", ",", "'minor'", ",", "'rios'", ",", "'rsect'", ",", "'wios'", ",", "'wsect'", ")", ",", "[", "int", "(", "x", ")", "for", "x", "in", "cols", "]", ")", ")", "else", ":", "continue", "self", ".", "_diskStats", "[", "dev", "]", "[", "'rbytes'", "]", "=", "(", "self", ".", "_diskStats", "[", "dev", "]", "[", "'rsect'", "]", "*", "sectorSize", ")", "self", ".", "_diskStats", "[", "dev", "]", "[", "'wbytes'", "]", "=", "(", "self", ".", "_diskStats", "[", "dev", "]", "[", "'wsect'", "]", "*", "sectorSize", ")", "self", ".", "_mapMajorMinor2dev", "[", "(", "int", "(", "cols", "[", "0", "]", ")", ",", "int", "(", "cols", "[", "1", "]", ")", ")", "]", "=", "dev" ]
Parse and initialize block device I/O stats in /proc/diskstats.
[ "Parse", "and", "initialize", "block", "device", "I", "/", "O", "stats", "in", "/", "proc", "/", "diskstats", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L159-L192
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo._initDevClasses
def _initDevClasses(self): """Sort block devices into lists depending on device class and initialize device type map and partition map.""" self._devClassTree = {} self._partitionTree = {} self._mapDevType = {} basedevs = [] otherdevs = [] if self._mapMajorDevclass is None: self._initBlockMajorMap() for dev in self._diskStats: stats = self._diskStats[dev] devclass = self._mapMajorDevclass.get(stats['major']) if devclass is not None: devdir = os.path.join(sysfsBlockdevDir, dev) if os.path.isdir(devdir): if not self._devClassTree.has_key(devclass): self._devClassTree[devclass] = [] self._devClassTree[devclass].append(dev) self._mapDevType[dev] = devclass basedevs.append(dev) else: otherdevs.append(dev) basedevs.sort(key=len, reverse=True) otherdevs.sort(key=len, reverse=True) idx = 0 for partdev in otherdevs: while len(basedevs[idx]) > partdev: idx += 1 for dev in basedevs[idx:]: if re.match("%s(\d+|p\d+)$" % dev, partdev): if not self._partitionTree.has_key(dev): self._partitionTree[dev] = [] self._partitionTree[dev].append(partdev) self._mapDevType[partdev] = 'part'
python
def _initDevClasses(self): """Sort block devices into lists depending on device class and initialize device type map and partition map.""" self._devClassTree = {} self._partitionTree = {} self._mapDevType = {} basedevs = [] otherdevs = [] if self._mapMajorDevclass is None: self._initBlockMajorMap() for dev in self._diskStats: stats = self._diskStats[dev] devclass = self._mapMajorDevclass.get(stats['major']) if devclass is not None: devdir = os.path.join(sysfsBlockdevDir, dev) if os.path.isdir(devdir): if not self._devClassTree.has_key(devclass): self._devClassTree[devclass] = [] self._devClassTree[devclass].append(dev) self._mapDevType[dev] = devclass basedevs.append(dev) else: otherdevs.append(dev) basedevs.sort(key=len, reverse=True) otherdevs.sort(key=len, reverse=True) idx = 0 for partdev in otherdevs: while len(basedevs[idx]) > partdev: idx += 1 for dev in basedevs[idx:]: if re.match("%s(\d+|p\d+)$" % dev, partdev): if not self._partitionTree.has_key(dev): self._partitionTree[dev] = [] self._partitionTree[dev].append(partdev) self._mapDevType[partdev] = 'part'
[ "def", "_initDevClasses", "(", "self", ")", ":", "self", ".", "_devClassTree", "=", "{", "}", "self", ".", "_partitionTree", "=", "{", "}", "self", ".", "_mapDevType", "=", "{", "}", "basedevs", "=", "[", "]", "otherdevs", "=", "[", "]", "if", "self", ".", "_mapMajorDevclass", "is", "None", ":", "self", ".", "_initBlockMajorMap", "(", ")", "for", "dev", "in", "self", ".", "_diskStats", ":", "stats", "=", "self", ".", "_diskStats", "[", "dev", "]", "devclass", "=", "self", ".", "_mapMajorDevclass", ".", "get", "(", "stats", "[", "'major'", "]", ")", "if", "devclass", "is", "not", "None", ":", "devdir", "=", "os", ".", "path", ".", "join", "(", "sysfsBlockdevDir", ",", "dev", ")", "if", "os", ".", "path", ".", "isdir", "(", "devdir", ")", ":", "if", "not", "self", ".", "_devClassTree", ".", "has_key", "(", "devclass", ")", ":", "self", ".", "_devClassTree", "[", "devclass", "]", "=", "[", "]", "self", ".", "_devClassTree", "[", "devclass", "]", ".", "append", "(", "dev", ")", "self", ".", "_mapDevType", "[", "dev", "]", "=", "devclass", "basedevs", ".", "append", "(", "dev", ")", "else", ":", "otherdevs", ".", "append", "(", "dev", ")", "basedevs", ".", "sort", "(", "key", "=", "len", ",", "reverse", "=", "True", ")", "otherdevs", ".", "sort", "(", "key", "=", "len", ",", "reverse", "=", "True", ")", "idx", "=", "0", "for", "partdev", "in", "otherdevs", ":", "while", "len", "(", "basedevs", "[", "idx", "]", ")", ">", "partdev", ":", "idx", "+=", "1", "for", "dev", "in", "basedevs", "[", "idx", ":", "]", ":", "if", "re", ".", "match", "(", "\"%s(\\d+|p\\d+)$\"", "%", "dev", ",", "partdev", ")", ":", "if", "not", "self", ".", "_partitionTree", ".", "has_key", "(", "dev", ")", ":", "self", ".", "_partitionTree", "[", "dev", "]", "=", "[", "]", "self", ".", "_partitionTree", "[", "dev", "]", ".", "append", "(", "partdev", ")", "self", ".", "_mapDevType", "[", "partdev", "]", "=", "'part'" ]
Sort block devices into lists depending on device class and initialize device type map and partition map.
[ "Sort", "block", "devices", "into", "lists", "depending", "on", "device", "class", "and", "initialize", "device", "type", "map", "and", "partition", "map", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L194-L228
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getDevType
def getDevType(self, dev): """Returns type of device dev. @return: Device type as string. """ if self._devClassTree is None: self._initDevClasses() return self._mapDevType.get(dev)
python
def getDevType(self, dev): """Returns type of device dev. @return: Device type as string. """ if self._devClassTree is None: self._initDevClasses() return self._mapDevType.get(dev)
[ "def", "getDevType", "(", "self", ",", "dev", ")", ":", "if", "self", ".", "_devClassTree", "is", "None", ":", "self", ".", "_initDevClasses", "(", ")", "return", "self", ".", "_mapDevType", ".", "get", "(", "dev", ")" ]
Returns type of device dev. @return: Device type as string.
[ "Returns", "type", "of", "device", "dev", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L230-L238
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getPartitionList
def getPartitionList(self): """Returns list of partitions. @return: List of (disk,partition) pairs. """ if self._partList is None: self._partList = [] for (disk,parts) in self.getPartitionDict().iteritems(): for part in parts: self._partList.append((disk,part)) return self._partList
python
def getPartitionList(self): """Returns list of partitions. @return: List of (disk,partition) pairs. """ if self._partList is None: self._partList = [] for (disk,parts) in self.getPartitionDict().iteritems(): for part in parts: self._partList.append((disk,part)) return self._partList
[ "def", "getPartitionList", "(", "self", ")", ":", "if", "self", ".", "_partList", "is", "None", ":", "self", ".", "_partList", "=", "[", "]", "for", "(", "disk", ",", "parts", ")", "in", "self", ".", "getPartitionDict", "(", ")", ".", "iteritems", "(", ")", ":", "for", "part", "in", "parts", ":", "self", ".", "_partList", ".", "append", "(", "(", "disk", ",", "part", ")", ")", "return", "self", ".", "_partList" ]
Returns list of partitions. @return: List of (disk,partition) pairs.
[ "Returns", "list", "of", "partitions", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L288-L299
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getDevStats
def getDevStats(self, dev, devtype = None): """Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats. """ if devtype is not None: if self._devClassTree is None: self._initDevClasses() if devtype <> self._mapDevType.get(dev): return None return self._diskStats.get(dev)
python
def getDevStats(self, dev, devtype = None): """Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats. """ if devtype is not None: if self._devClassTree is None: self._initDevClasses() if devtype <> self._mapDevType.get(dev): return None return self._diskStats.get(dev)
[ "def", "getDevStats", "(", "self", ",", "dev", ",", "devtype", "=", "None", ")", ":", "if", "devtype", "is", "not", "None", ":", "if", "self", ".", "_devClassTree", "is", "None", ":", "self", ".", "_initDevClasses", "(", ")", "if", "devtype", "<>", "self", ".", "_mapDevType", ".", "get", "(", "dev", ")", ":", "return", "None", "return", "self", ".", "_diskStats", ".", "get", "(", "dev", ")" ]
Returns I/O stats for block device. @param dev: Device name @param devtype: Device type. (Ignored if None.) @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "block", "device", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L367-L380
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getSwapStats
def getSwapStats(self, dev): """Returns I/O stats for swap partition. @param dev: Device name for swap partition. @return: Dict of stats. """ if self._swapList is None: self._initSwapInfo() if dev in self._swapList: return self.getDevStats(dev) else: return None
python
def getSwapStats(self, dev): """Returns I/O stats for swap partition. @param dev: Device name for swap partition. @return: Dict of stats. """ if self._swapList is None: self._initSwapInfo() if dev in self._swapList: return self.getDevStats(dev) else: return None
[ "def", "getSwapStats", "(", "self", ",", "dev", ")", ":", "if", "self", ".", "_swapList", "is", "None", ":", "self", ".", "_initSwapInfo", "(", ")", "if", "dev", "in", "self", ".", "_swapList", ":", "return", "self", ".", "getDevStats", "(", "dev", ")", "else", ":", "return", "None" ]
Returns I/O stats for swap partition. @param dev: Device name for swap partition. @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "swap", "partition", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L418-L430
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getLVstats
def getLVstats(self, *args): """Returns I/O stats for LV. @param args: Two calling conventions are implemented: - Passing two parameters vg and lv. - Passing only one parameter in 'vg-lv' format. @return: Dict of stats. """ if not len(args) in (1, 2): raise TypeError("The getLVstats must be called with either " "one or two arguments.") if self._vgTree is None: self._initDMinfo() if len(args) == 1: dmdev = self._mapLVname2dm.get(args[0]) else: dmdev = self._mapLVtuple2dm.get(args) if dmdev is not None: return self.getDevStats(dmdev) else: return None
python
def getLVstats(self, *args): """Returns I/O stats for LV. @param args: Two calling conventions are implemented: - Passing two parameters vg and lv. - Passing only one parameter in 'vg-lv' format. @return: Dict of stats. """ if not len(args) in (1, 2): raise TypeError("The getLVstats must be called with either " "one or two arguments.") if self._vgTree is None: self._initDMinfo() if len(args) == 1: dmdev = self._mapLVname2dm.get(args[0]) else: dmdev = self._mapLVtuple2dm.get(args) if dmdev is not None: return self.getDevStats(dmdev) else: return None
[ "def", "getLVstats", "(", "self", ",", "*", "args", ")", ":", "if", "not", "len", "(", "args", ")", "in", "(", "1", ",", "2", ")", ":", "raise", "TypeError", "(", "\"The getLVstats must be called with either \"", "\"one or two arguments.\"", ")", "if", "self", ".", "_vgTree", "is", "None", ":", "self", ".", "_initDMinfo", "(", ")", "if", "len", "(", "args", ")", "==", "1", ":", "dmdev", "=", "self", ".", "_mapLVname2dm", ".", "get", "(", "args", "[", "0", "]", ")", "else", ":", "dmdev", "=", "self", ".", "_mapLVtuple2dm", ".", "get", "(", "args", ")", "if", "dmdev", "is", "not", "None", ":", "return", "self", ".", "getDevStats", "(", "dmdev", ")", "else", ":", "return", "None" ]
Returns I/O stats for LV. @param args: Two calling conventions are implemented: - Passing two parameters vg and lv. - Passing only one parameter in 'vg-lv' format. @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "LV", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L432-L453
aouyar/PyMunin
pysysinfo/diskio.py
DiskIOinfo.getFilesystemStats
def getFilesystemStats(self, fs): """Returns I/O stats for filesystem. @param fs: Filesystem path. @return: Dict of stats. """ if self._mapFSpathDev is None: self._initFilesystemInfo() return self._diskStats.get(self._mapFSpathDev.get(fs))
python
def getFilesystemStats(self, fs): """Returns I/O stats for filesystem. @param fs: Filesystem path. @return: Dict of stats. """ if self._mapFSpathDev is None: self._initFilesystemInfo() return self._diskStats.get(self._mapFSpathDev.get(fs))
[ "def", "getFilesystemStats", "(", "self", ",", "fs", ")", ":", "if", "self", ".", "_mapFSpathDev", "is", "None", ":", "self", ".", "_initFilesystemInfo", "(", ")", "return", "self", ".", "_diskStats", ".", "get", "(", "self", ".", "_mapFSpathDev", ".", "get", "(", "fs", ")", ")" ]
Returns I/O stats for filesystem. @param fs: Filesystem path. @return: Dict of stats.
[ "Returns", "I", "/", "O", "stats", "for", "filesystem", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L455-L464
aouyar/PyMunin
pymunin/plugins/ntphostoffset_.py
MuninNTPhostOffsetPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() stats = ntpinfo.getHostOffset(self._remoteHost) if stats: graph_name = 'ntp_host_stratum_%s' % self._remoteHost if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'stratum', stats.get('stratum')) graph_name = 'ntp_host_stat_%s' % self._remoteHost if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'offset', stats.get('offset')) self.setGraphVal(graph_name, 'delay', stats.get('delay'))
python
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() stats = ntpinfo.getHostOffset(self._remoteHost) if stats: graph_name = 'ntp_host_stratum_%s' % self._remoteHost if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'stratum', stats.get('stratum')) graph_name = 'ntp_host_stat_%s' % self._remoteHost if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'offset', stats.get('offset')) self.setGraphVal(graph_name, 'delay', stats.get('delay'))
[ "def", "retrieveVals", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "stats", "=", "ntpinfo", ".", "getHostOffset", "(", "self", ".", "_remoteHost", ")", "if", "stats", ":", "graph_name", "=", "'ntp_host_stratum_%s'", "%", "self", ".", "_remoteHost", "if", "self", ".", "hasGraph", "(", "graph_name", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "'stratum'", ",", "stats", ".", "get", "(", "'stratum'", ")", ")", "graph_name", "=", "'ntp_host_stat_%s'", "%", "self", ".", "_remoteHost", "if", "self", ".", "hasGraph", "(", "graph_name", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "'offset'", ",", "stats", ".", "get", "(", "'offset'", ")", ")", "self", ".", "setGraphVal", "(", "graph_name", ",", "'delay'", ",", "stats", ".", "get", "(", "'delay'", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntphostoffset_.py#L105-L116
aouyar/PyMunin
pysysinfo/rackspace.py
CloudFilesInfo.getContainerStats
def getContainerStats(self, limit=None, marker=None): """Returns Rackspace Cloud Files usage stats for containers. @param limit: Number of containers to return. @param marker: Return only results whose name is greater than marker. @return: Dictionary of container stats indexed by container name. """ stats = {} for row in self._conn.list_containers_info(limit, marker): stats[row['name']] = {'count': row['count'], 'size': row['bytes']} return stats
python
def getContainerStats(self, limit=None, marker=None): """Returns Rackspace Cloud Files usage stats for containers. @param limit: Number of containers to return. @param marker: Return only results whose name is greater than marker. @return: Dictionary of container stats indexed by container name. """ stats = {} for row in self._conn.list_containers_info(limit, marker): stats[row['name']] = {'count': row['count'], 'size': row['bytes']} return stats
[ "def", "getContainerStats", "(", "self", ",", "limit", "=", "None", ",", "marker", "=", "None", ")", ":", "stats", "=", "{", "}", "for", "row", "in", "self", ".", "_conn", ".", "list_containers_info", "(", "limit", ",", "marker", ")", ":", "stats", "[", "row", "[", "'name'", "]", "]", "=", "{", "'count'", ":", "row", "[", "'count'", "]", ",", "'size'", ":", "row", "[", "'bytes'", "]", "}", "return", "stats" ]
Returns Rackspace Cloud Files usage stats for containers. @param limit: Number of containers to return. @param marker: Return only results whose name is greater than marker. @return: Dictionary of container stats indexed by container name.
[ "Returns", "Rackspace", "Cloud", "Files", "usage", "stats", "for", "containers", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/rackspace.py#L58-L69
ContextLab/quail
quail/decode_speech.py
decode_speech
def decode_speech(path, keypath=None, save=False, speech_context=None, sample_rate=44100, max_alternatives=1, language_code='en-US', enable_word_time_offsets=True, return_raw=False): """ Decode speech for a file or folder and return results This function wraps the Google Speech API and ffmpeg to decode speech for free recall experiments. Note: in order for this to work, you must have a Google Speech account, a google speech credentials file referenced in your _bash_profile, and ffmpeg installed on your computer. See our readthedocs for more information on how to set this up: http://cdl-quail.readthedocs.io/en/latest/. Parameters ---------- path : str Path to a wav file, or a folder of wav files. keypath : str Google Cloud Speech API key filepath. This is a JSON file containing credentials that was generated when creating a service account key. If None, assumes you have a local key that is set with an environmental variable. See the speech decoding tutorial for details. save : boolean False by default, but if set to true, will save a pickle with the results object from google speech, and a text file with the decoded words. speech_context : list of str This allows you to give some context to the speech decoding algorithm. For example, this could be the words studied on a given list, or all words in an experiment. sample_rate : float The sample rate of your audio files (default is 44100). max_alternatives : int You can specify the speech decoding to return multiple guesses to the decoding. This will be saved in the results object (default is 1). language_code : str Decoding language code. Default is en-US. See here for more details: https://cloud.google.com/speech/docs/languages enable_word_time_offsets : bool Returns timing information s(onsets/offsets) for each word (default is True). return_raw : boolean Intead of returning the parsed results objects (i.e. the words), you can return the raw reponse object. This has more details about the decoding, such as confidence. Returns ---------- words : list of str, or list of lists of str The results of the speech decoding. This will be a list if only one file is input, or a list of lists if more than one file is decoded. raw : google speech object, or list of objects You can optionally return the google speech object instead of the parsed results by using the return_raw flag. """ # SUBFUNCTIONS def decode_file(file_path, client, speech_context, sample_rate, max_alternatives, enable_word_time_offsets): def recognize(chunk, file_path): """ Subfunction that loops over audio segments to recognize speech """ # export as flac chunk.export(file_path + ".flac", format = "flac", bitrate="44.1k") # open flac file with open(file_path + ".flac", 'rb') as sc: speech_content = sc.read() # initialize speech sample sample = types.RecognitionAudio(content=speech_content) # run speech decoding try: result = client.recognize(opts, sample) except ValueError as e: print(e) result = None return result opts = {} opts['encoding']=enums.RecognitionConfig.AudioEncoding.FLAC opts['language_code'] = language_code opts['sample_rate_hertz'] = sample_rate opts['max_alternatives'] = max_alternatives opts['enable_word_time_offsets'] = enable_word_time_offsets if speech_context: opts['speech_contexts']=[types.SpeechContext(phrases=speech_context)] # read in wav audio = AudioSegment.from_wav(file_path) # segment into 1 minute chunks if len(audio)>60000: segments = list(range(0,len(audio),60000)) if segments[-1]<len(audio): segments.append(len(audio)-1) print('Audio clip is longer than 1 minute. Splitting into %d one minute segments...' % (len(segments)-1)) audio_chunks = [] for i in range(len(segments)-1): audio_chunks.append(audio[segments[i]:segments[i+1]]) else: audio_chunks = [audio] # loop over audio segments results = [] for idx, chunk in enumerate(audio_chunks): results.append(recognize(chunk, file_path+str(idx))) # return list of results return results def parse_response(results): """Parses response from google speech""" words = [] for chunk in results: for result in chunk.results: alternative = result.alternatives[0] print('Transcript: {}'.format(alternative.transcript)) print('Confidence: {}'.format(alternative.confidence)) for word_info in alternative.words: word = word_info.word start_time = word_info.start_time end_time = word_info.end_time print('Word: {}, start_time: {}, end_time: {}'.format( word, start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9)) words.append((str(word).upper(), start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9)) return words # def parse_response(results): # """Parses response from google speech""" # words = [] # for idx, result in enumerate(results): # if result is None: # warnings.warn('No speech was decoded for segment %d' % (idx+1)) # words.append(None) # else: # try: # for segment in result: # for chunk in segment.transcript.split(' '): # if chunk != '': # words.append(str(chunk).upper()) # except: # warnings.warn('Error parsing response for segment %d' % (idx+1)) # # return words # MAIN ##################################################################### # initialize speech client if keypath: credentials = service_account.Credentials.from_service_account_file(keypath) scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/cloud-platform']) client = speech.SpeechClient(credentials=scoped_credentials) else: client = speech.SpeechClient() # make a list of files files = [] if path.endswith(".wav"): files = [path] else: listdirectory = os.listdir(path) for filename in listdirectory: if filename.endswith(".wav"): files.append(path + filename) # initialize list of words words = [] raw = [] # loop over files for i, f in enumerate(files): # print progress print('Decoding file ' + str(i+1) + ' of ' + str(len(files))) try: # start timer start = time.time() # decode file results = decode_file(f, client, speech_context, sample_rate, max_alternatives, enable_word_time_offsets) # parsing response parsed_results = parse_response(results) # save the processed file words.append(parsed_results) # save the processed file raw.append(results) if save: # save the raw response in a pickle pickle.dump(results, open(f + ".p", "wb" ) ) # save a text file with just the words pd.DataFrame(parsed_results).to_csv(f + '.txt', header=False, index=False) # print when finished print('Finished file ' + str(i+1) + ' of ' + str(len(files)) + ' in ' + str(round(time.time()-start,2)) + ' seconds.') # handle when something goes wrong except ValueError as e: words.append("Error") print(e) print('Decoding of file ' + str(i) + 'failed. Moving on to next file.') if return_raw: if len(words)>1: return raw else: return raw[0] else: if len(words)>1: return words else: return words[0]
python
def decode_speech(path, keypath=None, save=False, speech_context=None, sample_rate=44100, max_alternatives=1, language_code='en-US', enable_word_time_offsets=True, return_raw=False): """ Decode speech for a file or folder and return results This function wraps the Google Speech API and ffmpeg to decode speech for free recall experiments. Note: in order for this to work, you must have a Google Speech account, a google speech credentials file referenced in your _bash_profile, and ffmpeg installed on your computer. See our readthedocs for more information on how to set this up: http://cdl-quail.readthedocs.io/en/latest/. Parameters ---------- path : str Path to a wav file, or a folder of wav files. keypath : str Google Cloud Speech API key filepath. This is a JSON file containing credentials that was generated when creating a service account key. If None, assumes you have a local key that is set with an environmental variable. See the speech decoding tutorial for details. save : boolean False by default, but if set to true, will save a pickle with the results object from google speech, and a text file with the decoded words. speech_context : list of str This allows you to give some context to the speech decoding algorithm. For example, this could be the words studied on a given list, or all words in an experiment. sample_rate : float The sample rate of your audio files (default is 44100). max_alternatives : int You can specify the speech decoding to return multiple guesses to the decoding. This will be saved in the results object (default is 1). language_code : str Decoding language code. Default is en-US. See here for more details: https://cloud.google.com/speech/docs/languages enable_word_time_offsets : bool Returns timing information s(onsets/offsets) for each word (default is True). return_raw : boolean Intead of returning the parsed results objects (i.e. the words), you can return the raw reponse object. This has more details about the decoding, such as confidence. Returns ---------- words : list of str, or list of lists of str The results of the speech decoding. This will be a list if only one file is input, or a list of lists if more than one file is decoded. raw : google speech object, or list of objects You can optionally return the google speech object instead of the parsed results by using the return_raw flag. """ # SUBFUNCTIONS def decode_file(file_path, client, speech_context, sample_rate, max_alternatives, enable_word_time_offsets): def recognize(chunk, file_path): """ Subfunction that loops over audio segments to recognize speech """ # export as flac chunk.export(file_path + ".flac", format = "flac", bitrate="44.1k") # open flac file with open(file_path + ".flac", 'rb') as sc: speech_content = sc.read() # initialize speech sample sample = types.RecognitionAudio(content=speech_content) # run speech decoding try: result = client.recognize(opts, sample) except ValueError as e: print(e) result = None return result opts = {} opts['encoding']=enums.RecognitionConfig.AudioEncoding.FLAC opts['language_code'] = language_code opts['sample_rate_hertz'] = sample_rate opts['max_alternatives'] = max_alternatives opts['enable_word_time_offsets'] = enable_word_time_offsets if speech_context: opts['speech_contexts']=[types.SpeechContext(phrases=speech_context)] # read in wav audio = AudioSegment.from_wav(file_path) # segment into 1 minute chunks if len(audio)>60000: segments = list(range(0,len(audio),60000)) if segments[-1]<len(audio): segments.append(len(audio)-1) print('Audio clip is longer than 1 minute. Splitting into %d one minute segments...' % (len(segments)-1)) audio_chunks = [] for i in range(len(segments)-1): audio_chunks.append(audio[segments[i]:segments[i+1]]) else: audio_chunks = [audio] # loop over audio segments results = [] for idx, chunk in enumerate(audio_chunks): results.append(recognize(chunk, file_path+str(idx))) # return list of results return results def parse_response(results): """Parses response from google speech""" words = [] for chunk in results: for result in chunk.results: alternative = result.alternatives[0] print('Transcript: {}'.format(alternative.transcript)) print('Confidence: {}'.format(alternative.confidence)) for word_info in alternative.words: word = word_info.word start_time = word_info.start_time end_time = word_info.end_time print('Word: {}, start_time: {}, end_time: {}'.format( word, start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9)) words.append((str(word).upper(), start_time.seconds + start_time.nanos * 1e-9, end_time.seconds + end_time.nanos * 1e-9)) return words # def parse_response(results): # """Parses response from google speech""" # words = [] # for idx, result in enumerate(results): # if result is None: # warnings.warn('No speech was decoded for segment %d' % (idx+1)) # words.append(None) # else: # try: # for segment in result: # for chunk in segment.transcript.split(' '): # if chunk != '': # words.append(str(chunk).upper()) # except: # warnings.warn('Error parsing response for segment %d' % (idx+1)) # # return words # MAIN ##################################################################### # initialize speech client if keypath: credentials = service_account.Credentials.from_service_account_file(keypath) scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/cloud-platform']) client = speech.SpeechClient(credentials=scoped_credentials) else: client = speech.SpeechClient() # make a list of files files = [] if path.endswith(".wav"): files = [path] else: listdirectory = os.listdir(path) for filename in listdirectory: if filename.endswith(".wav"): files.append(path + filename) # initialize list of words words = [] raw = [] # loop over files for i, f in enumerate(files): # print progress print('Decoding file ' + str(i+1) + ' of ' + str(len(files))) try: # start timer start = time.time() # decode file results = decode_file(f, client, speech_context, sample_rate, max_alternatives, enable_word_time_offsets) # parsing response parsed_results = parse_response(results) # save the processed file words.append(parsed_results) # save the processed file raw.append(results) if save: # save the raw response in a pickle pickle.dump(results, open(f + ".p", "wb" ) ) # save a text file with just the words pd.DataFrame(parsed_results).to_csv(f + '.txt', header=False, index=False) # print when finished print('Finished file ' + str(i+1) + ' of ' + str(len(files)) + ' in ' + str(round(time.time()-start,2)) + ' seconds.') # handle when something goes wrong except ValueError as e: words.append("Error") print(e) print('Decoding of file ' + str(i) + 'failed. Moving on to next file.') if return_raw: if len(words)>1: return raw else: return raw[0] else: if len(words)>1: return words else: return words[0]
[ "def", "decode_speech", "(", "path", ",", "keypath", "=", "None", ",", "save", "=", "False", ",", "speech_context", "=", "None", ",", "sample_rate", "=", "44100", ",", "max_alternatives", "=", "1", ",", "language_code", "=", "'en-US'", ",", "enable_word_time_offsets", "=", "True", ",", "return_raw", "=", "False", ")", ":", "# SUBFUNCTIONS", "def", "decode_file", "(", "file_path", ",", "client", ",", "speech_context", ",", "sample_rate", ",", "max_alternatives", ",", "enable_word_time_offsets", ")", ":", "def", "recognize", "(", "chunk", ",", "file_path", ")", ":", "\"\"\"\n Subfunction that loops over audio segments to recognize speech\n \"\"\"", "# export as flac", "chunk", ".", "export", "(", "file_path", "+", "\".flac\"", ",", "format", "=", "\"flac\"", ",", "bitrate", "=", "\"44.1k\"", ")", "# open flac file", "with", "open", "(", "file_path", "+", "\".flac\"", ",", "'rb'", ")", "as", "sc", ":", "speech_content", "=", "sc", ".", "read", "(", ")", "# initialize speech sample", "sample", "=", "types", ".", "RecognitionAudio", "(", "content", "=", "speech_content", ")", "# run speech decoding", "try", ":", "result", "=", "client", ".", "recognize", "(", "opts", ",", "sample", ")", "except", "ValueError", "as", "e", ":", "print", "(", "e", ")", "result", "=", "None", "return", "result", "opts", "=", "{", "}", "opts", "[", "'encoding'", "]", "=", "enums", ".", "RecognitionConfig", ".", "AudioEncoding", ".", "FLAC", "opts", "[", "'language_code'", "]", "=", "language_code", "opts", "[", "'sample_rate_hertz'", "]", "=", "sample_rate", "opts", "[", "'max_alternatives'", "]", "=", "max_alternatives", "opts", "[", "'enable_word_time_offsets'", "]", "=", "enable_word_time_offsets", "if", "speech_context", ":", "opts", "[", "'speech_contexts'", "]", "=", "[", "types", ".", "SpeechContext", "(", "phrases", "=", "speech_context", ")", "]", "# read in wav", "audio", "=", "AudioSegment", ".", "from_wav", "(", "file_path", ")", "# segment into 1 minute chunks", "if", "len", "(", "audio", ")", ">", "60000", ":", "segments", "=", "list", "(", "range", "(", "0", ",", "len", "(", "audio", ")", ",", "60000", ")", ")", "if", "segments", "[", "-", "1", "]", "<", "len", "(", "audio", ")", ":", "segments", ".", "append", "(", "len", "(", "audio", ")", "-", "1", ")", "print", "(", "'Audio clip is longer than 1 minute. Splitting into %d one minute segments...'", "%", "(", "len", "(", "segments", ")", "-", "1", ")", ")", "audio_chunks", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "segments", ")", "-", "1", ")", ":", "audio_chunks", ".", "append", "(", "audio", "[", "segments", "[", "i", "]", ":", "segments", "[", "i", "+", "1", "]", "]", ")", "else", ":", "audio_chunks", "=", "[", "audio", "]", "# loop over audio segments", "results", "=", "[", "]", "for", "idx", ",", "chunk", "in", "enumerate", "(", "audio_chunks", ")", ":", "results", ".", "append", "(", "recognize", "(", "chunk", ",", "file_path", "+", "str", "(", "idx", ")", ")", ")", "# return list of results", "return", "results", "def", "parse_response", "(", "results", ")", ":", "\"\"\"Parses response from google speech\"\"\"", "words", "=", "[", "]", "for", "chunk", "in", "results", ":", "for", "result", "in", "chunk", ".", "results", ":", "alternative", "=", "result", ".", "alternatives", "[", "0", "]", "print", "(", "'Transcript: {}'", ".", "format", "(", "alternative", ".", "transcript", ")", ")", "print", "(", "'Confidence: {}'", ".", "format", "(", "alternative", ".", "confidence", ")", ")", "for", "word_info", "in", "alternative", ".", "words", ":", "word", "=", "word_info", ".", "word", "start_time", "=", "word_info", ".", "start_time", "end_time", "=", "word_info", ".", "end_time", "print", "(", "'Word: {}, start_time: {}, end_time: {}'", ".", "format", "(", "word", ",", "start_time", ".", "seconds", "+", "start_time", ".", "nanos", "*", "1e-9", ",", "end_time", ".", "seconds", "+", "end_time", ".", "nanos", "*", "1e-9", ")", ")", "words", ".", "append", "(", "(", "str", "(", "word", ")", ".", "upper", "(", ")", ",", "start_time", ".", "seconds", "+", "start_time", ".", "nanos", "*", "1e-9", ",", "end_time", ".", "seconds", "+", "end_time", ".", "nanos", "*", "1e-9", ")", ")", "return", "words", "# def parse_response(results):", "# \"\"\"Parses response from google speech\"\"\"", "# words = []", "# for idx, result in enumerate(results):", "# if result is None:", "# warnings.warn('No speech was decoded for segment %d' % (idx+1))", "# words.append(None)", "# else:", "# try:", "# for segment in result:", "# for chunk in segment.transcript.split(' '):", "# if chunk != '':", "# words.append(str(chunk).upper())", "# except:", "# warnings.warn('Error parsing response for segment %d' % (idx+1))", "#", "# return words", "# MAIN #####################################################################", "# initialize speech client", "if", "keypath", ":", "credentials", "=", "service_account", ".", "Credentials", ".", "from_service_account_file", "(", "keypath", ")", "scoped_credentials", "=", "credentials", ".", "with_scopes", "(", "[", "'https://www.googleapis.com/auth/cloud-platform'", "]", ")", "client", "=", "speech", ".", "SpeechClient", "(", "credentials", "=", "scoped_credentials", ")", "else", ":", "client", "=", "speech", ".", "SpeechClient", "(", ")", "# make a list of files", "files", "=", "[", "]", "if", "path", ".", "endswith", "(", "\".wav\"", ")", ":", "files", "=", "[", "path", "]", "else", ":", "listdirectory", "=", "os", ".", "listdir", "(", "path", ")", "for", "filename", "in", "listdirectory", ":", "if", "filename", ".", "endswith", "(", "\".wav\"", ")", ":", "files", ".", "append", "(", "path", "+", "filename", ")", "# initialize list of words", "words", "=", "[", "]", "raw", "=", "[", "]", "# loop over files", "for", "i", ",", "f", "in", "enumerate", "(", "files", ")", ":", "# print progress", "print", "(", "'Decoding file '", "+", "str", "(", "i", "+", "1", ")", "+", "' of '", "+", "str", "(", "len", "(", "files", ")", ")", ")", "try", ":", "# start timer", "start", "=", "time", ".", "time", "(", ")", "# decode file", "results", "=", "decode_file", "(", "f", ",", "client", ",", "speech_context", ",", "sample_rate", ",", "max_alternatives", ",", "enable_word_time_offsets", ")", "# parsing response", "parsed_results", "=", "parse_response", "(", "results", ")", "# save the processed file", "words", ".", "append", "(", "parsed_results", ")", "# save the processed file", "raw", ".", "append", "(", "results", ")", "if", "save", ":", "# save the raw response in a pickle", "pickle", ".", "dump", "(", "results", ",", "open", "(", "f", "+", "\".p\"", ",", "\"wb\"", ")", ")", "# save a text file with just the words", "pd", ".", "DataFrame", "(", "parsed_results", ")", ".", "to_csv", "(", "f", "+", "'.txt'", ",", "header", "=", "False", ",", "index", "=", "False", ")", "# print when finished", "print", "(", "'Finished file '", "+", "str", "(", "i", "+", "1", ")", "+", "' of '", "+", "str", "(", "len", "(", "files", ")", ")", "+", "' in '", "+", "str", "(", "round", "(", "time", ".", "time", "(", ")", "-", "start", ",", "2", ")", ")", "+", "' seconds.'", ")", "# handle when something goes wrong", "except", "ValueError", "as", "e", ":", "words", ".", "append", "(", "\"Error\"", ")", "print", "(", "e", ")", "print", "(", "'Decoding of file '", "+", "str", "(", "i", ")", "+", "'failed. Moving on to next file.'", ")", "if", "return_raw", ":", "if", "len", "(", "words", ")", ">", "1", ":", "return", "raw", "else", ":", "return", "raw", "[", "0", "]", "else", ":", "if", "len", "(", "words", ")", ">", "1", ":", "return", "words", "else", ":", "return", "words", "[", "0", "]" ]
Decode speech for a file or folder and return results This function wraps the Google Speech API and ffmpeg to decode speech for free recall experiments. Note: in order for this to work, you must have a Google Speech account, a google speech credentials file referenced in your _bash_profile, and ffmpeg installed on your computer. See our readthedocs for more information on how to set this up: http://cdl-quail.readthedocs.io/en/latest/. Parameters ---------- path : str Path to a wav file, or a folder of wav files. keypath : str Google Cloud Speech API key filepath. This is a JSON file containing credentials that was generated when creating a service account key. If None, assumes you have a local key that is set with an environmental variable. See the speech decoding tutorial for details. save : boolean False by default, but if set to true, will save a pickle with the results object from google speech, and a text file with the decoded words. speech_context : list of str This allows you to give some context to the speech decoding algorithm. For example, this could be the words studied on a given list, or all words in an experiment. sample_rate : float The sample rate of your audio files (default is 44100). max_alternatives : int You can specify the speech decoding to return multiple guesses to the decoding. This will be saved in the results object (default is 1). language_code : str Decoding language code. Default is en-US. See here for more details: https://cloud.google.com/speech/docs/languages enable_word_time_offsets : bool Returns timing information s(onsets/offsets) for each word (default is True). return_raw : boolean Intead of returning the parsed results objects (i.e. the words), you can return the raw reponse object. This has more details about the decoding, such as confidence. Returns ---------- words : list of str, or list of lists of str The results of the speech decoding. This will be a list if only one file is input, or a list of lists if more than one file is decoded. raw : google speech object, or list of objects You can optionally return the google speech object instead of the parsed results by using the return_raw flag.
[ "Decode", "speech", "for", "a", "file", "or", "folder", "and", "return", "results" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/decode_speech.py#L24-L265
aouyar/PyMunin
pysysinfo/squid.py
parse_value
def parse_value(val): """Parse input string and return int, float or str depending on format. @param val: Input string. @return: Value of type int, float or str. """ mobj = re.match('(-{0,1}\d+)\s*(\sseconds|/\s*\w+)$', val) if mobj: return int(mobj.group(1)) mobj = re.match('(-{0,1}\d*\.\d+)\s*(\sseconds|/\s*\w+)$', val) if mobj: return float(mobj.group(1)) re.match('(-{0,1}\d+)\s*([GMK])B$', val) if mobj: return int(mobj.group(1)) * memMultiplier[mobj.group(2)] mobj = re.match('(-{0,1}\d+(\.\d+){0,1})\s*\%$', val) if mobj: return float(mobj.group(1)) / 100 return val
python
def parse_value(val): """Parse input string and return int, float or str depending on format. @param val: Input string. @return: Value of type int, float or str. """ mobj = re.match('(-{0,1}\d+)\s*(\sseconds|/\s*\w+)$', val) if mobj: return int(mobj.group(1)) mobj = re.match('(-{0,1}\d*\.\d+)\s*(\sseconds|/\s*\w+)$', val) if mobj: return float(mobj.group(1)) re.match('(-{0,1}\d+)\s*([GMK])B$', val) if mobj: return int(mobj.group(1)) * memMultiplier[mobj.group(2)] mobj = re.match('(-{0,1}\d+(\.\d+){0,1})\s*\%$', val) if mobj: return float(mobj.group(1)) / 100 return val
[ "def", "parse_value", "(", "val", ")", ":", "mobj", "=", "re", ".", "match", "(", "'(-{0,1}\\d+)\\s*(\\sseconds|/\\s*\\w+)$'", ",", "val", ")", "if", "mobj", ":", "return", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "mobj", "=", "re", ".", "match", "(", "'(-{0,1}\\d*\\.\\d+)\\s*(\\sseconds|/\\s*\\w+)$'", ",", "val", ")", "if", "mobj", ":", "return", "float", "(", "mobj", ".", "group", "(", "1", ")", ")", "re", ".", "match", "(", "'(-{0,1}\\d+)\\s*([GMK])B$'", ",", "val", ")", "if", "mobj", ":", "return", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "*", "memMultiplier", "[", "mobj", ".", "group", "(", "2", ")", "]", "mobj", "=", "re", ".", "match", "(", "'(-{0,1}\\d+(\\.\\d+){0,1})\\s*\\%$'", ",", "val", ")", "if", "mobj", ":", "return", "float", "(", "mobj", ".", "group", "(", "1", ")", ")", "/", "100", "return", "val" ]
Parse input string and return int, float or str depending on format. @param val: Input string. @return: Value of type int, float or str.
[ "Parse", "input", "string", "and", "return", "int", "float", "or", "str", "depending", "on", "format", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L31-L51
aouyar/PyMunin
pysysinfo/squid.py
SquidInfo._connect
def _connect(self): """Connect to Squid Proxy Manager interface.""" if sys.version_info[:2] < (2,6): self._conn = httplib.HTTPConnection(self._host, self._port) else: self._conn = httplib.HTTPConnection(self._host, self._port, False, defaultTimeout)
python
def _connect(self): """Connect to Squid Proxy Manager interface.""" if sys.version_info[:2] < (2,6): self._conn = httplib.HTTPConnection(self._host, self._port) else: self._conn = httplib.HTTPConnection(self._host, self._port, False, defaultTimeout)
[ "def", "_connect", "(", "self", ")", ":", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "2", ",", "6", ")", ":", "self", ".", "_conn", "=", "httplib", ".", "HTTPConnection", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", "else", ":", "self", ".", "_conn", "=", "httplib", ".", "HTTPConnection", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "False", ",", "defaultTimeout", ")" ]
Connect to Squid Proxy Manager interface.
[ "Connect", "to", "Squid", "Proxy", "Manager", "interface", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L90-L96
aouyar/PyMunin
pysysinfo/squid.py
SquidInfo._retrieve
def _retrieve(self, map): """Query Squid Proxy Server Manager Interface for stats. @param map: Statistics map name. @return: Dictionary of query results. """ self._conn.request('GET', "cache_object://%s/%s" % (self._host, map), None, self._httpHeaders) rp = self._conn.getresponse() if rp.status == 200: data = rp.read() return data else: raise Exception("Retrieval of stats from Squid Proxy Server" "on host %s and port %s failed.\n" "HTTP - Status: %s Reason: %s" % (self._host, self._port, rp.status, rp.reason))
python
def _retrieve(self, map): """Query Squid Proxy Server Manager Interface for stats. @param map: Statistics map name. @return: Dictionary of query results. """ self._conn.request('GET', "cache_object://%s/%s" % (self._host, map), None, self._httpHeaders) rp = self._conn.getresponse() if rp.status == 200: data = rp.read() return data else: raise Exception("Retrieval of stats from Squid Proxy Server" "on host %s and port %s failed.\n" "HTTP - Status: %s Reason: %s" % (self._host, self._port, rp.status, rp.reason))
[ "def", "_retrieve", "(", "self", ",", "map", ")", ":", "self", ".", "_conn", ".", "request", "(", "'GET'", ",", "\"cache_object://%s/%s\"", "%", "(", "self", ".", "_host", ",", "map", ")", ",", "None", ",", "self", ".", "_httpHeaders", ")", "rp", "=", "self", ".", "_conn", ".", "getresponse", "(", ")", "if", "rp", ".", "status", "==", "200", ":", "data", "=", "rp", ".", "read", "(", ")", "return", "data", "else", ":", "raise", "Exception", "(", "\"Retrieval of stats from Squid Proxy Server\"", "\"on host %s and port %s failed.\\n\"", "\"HTTP - Status: %s Reason: %s\"", "%", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "rp", ".", "status", ",", "rp", ".", "reason", ")", ")" ]
Query Squid Proxy Server Manager Interface for stats. @param map: Statistics map name. @return: Dictionary of query results.
[ "Query", "Squid", "Proxy", "Server", "Manager", "Interface", "for", "stats", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L98-L115
aouyar/PyMunin
pysysinfo/squid.py
SquidInfo._parseCounters
def _parseCounters(self, data): """Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. """ info_dict = util.NestedDict() for line in data.splitlines(): mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line) if mobj: (key, value) = mobj.groups() klist = key.split('.') info_dict.set_nested(klist, parse_value(value)) return info_dict
python
def _parseCounters(self, data): """Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. """ info_dict = util.NestedDict() for line in data.splitlines(): mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line) if mobj: (key, value) = mobj.groups() klist = key.split('.') info_dict.set_nested(klist, parse_value(value)) return info_dict
[ "def", "_parseCounters", "(", "self", ",", "data", ")", ":", "info_dict", "=", "util", ".", "NestedDict", "(", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'^\\s*([\\w\\.]+)\\s*=\\s*(\\S.*)$'", ",", "line", ")", "if", "mobj", ":", "(", "key", ",", "value", ")", "=", "mobj", ".", "groups", "(", ")", "klist", "=", "key", ".", "split", "(", "'.'", ")", "info_dict", ".", "set_nested", "(", "klist", ",", "parse_value", "(", "value", ")", ")", "return", "info_dict" ]
Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats.
[ "Parse", "simple", "stats", "list", "of", "key", "value", "pairs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L117-L131
aouyar/PyMunin
pysysinfo/squid.py
SquidInfo._parseSections
def _parseSections(self, data): """Parse data and separate sections. Returns dictionary that maps section name to section data. @param data: Multiline data. @return: Dictionary that maps section names to section data. """ section_dict = {} lines = data.splitlines() idx = 0 numlines = len(lines) section = None while idx < numlines: line = lines[idx] idx += 1 mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:$', line) if mobj: section = mobj.group(1) section_dict[section] = [] else: mobj = re.match('(\t|\s)\s*(\w.*)$', line) if mobj: section_dict[section].append(mobj.group(2)) else: mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:\s*(\S.*)$', line) if mobj: section = None if not section_dict.has_key(section): section_dict[section] = [] section_dict[section].append(line) else: if not section_dict.has_key('PARSEERROR'): section_dict['PARSEERROR'] = [] section_dict['PARSEERROR'].append(line) return section_dict
python
def _parseSections(self, data): """Parse data and separate sections. Returns dictionary that maps section name to section data. @param data: Multiline data. @return: Dictionary that maps section names to section data. """ section_dict = {} lines = data.splitlines() idx = 0 numlines = len(lines) section = None while idx < numlines: line = lines[idx] idx += 1 mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:$', line) if mobj: section = mobj.group(1) section_dict[section] = [] else: mobj = re.match('(\t|\s)\s*(\w.*)$', line) if mobj: section_dict[section].append(mobj.group(2)) else: mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:\s*(\S.*)$', line) if mobj: section = None if not section_dict.has_key(section): section_dict[section] = [] section_dict[section].append(line) else: if not section_dict.has_key('PARSEERROR'): section_dict['PARSEERROR'] = [] section_dict['PARSEERROR'].append(line) return section_dict
[ "def", "_parseSections", "(", "self", ",", "data", ")", ":", "section_dict", "=", "{", "}", "lines", "=", "data", ".", "splitlines", "(", ")", "idx", "=", "0", "numlines", "=", "len", "(", "lines", ")", "section", "=", "None", "while", "idx", "<", "numlines", ":", "line", "=", "lines", "[", "idx", "]", "idx", "+=", "1", "mobj", "=", "re", ".", "match", "(", "'^(\\w[\\w\\s\\(\\)]+[\\w\\)])\\s*:$'", ",", "line", ")", "if", "mobj", ":", "section", "=", "mobj", ".", "group", "(", "1", ")", "section_dict", "[", "section", "]", "=", "[", "]", "else", ":", "mobj", "=", "re", ".", "match", "(", "'(\\t|\\s)\\s*(\\w.*)$'", ",", "line", ")", "if", "mobj", ":", "section_dict", "[", "section", "]", ".", "append", "(", "mobj", ".", "group", "(", "2", ")", ")", "else", ":", "mobj", "=", "re", ".", "match", "(", "'^(\\w[\\w\\s\\(\\)]+[\\w\\)])\\s*:\\s*(\\S.*)$'", ",", "line", ")", "if", "mobj", ":", "section", "=", "None", "if", "not", "section_dict", ".", "has_key", "(", "section", ")", ":", "section_dict", "[", "section", "]", "=", "[", "]", "section_dict", "[", "section", "]", ".", "append", "(", "line", ")", "else", ":", "if", "not", "section_dict", ".", "has_key", "(", "'PARSEERROR'", ")", ":", "section_dict", "[", "'PARSEERROR'", "]", "=", "[", "]", "section_dict", "[", "'PARSEERROR'", "]", ".", "append", "(", "line", ")", "return", "section_dict" ]
Parse data and separate sections. Returns dictionary that maps section name to section data. @param data: Multiline data. @return: Dictionary that maps section names to section data.
[ "Parse", "data", "and", "separate", "sections", ".", "Returns", "dictionary", "that", "maps", "section", "name", "to", "section", "data", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L133-L168
aouyar/PyMunin
pysysinfo/squid.py
SquidInfo.getMenu
def getMenu(self): """Get manager interface section list from Squid Proxy Server @return: List of tuples (section, description, type) """ data = self._retrieve('') info_list = [] for line in data.splitlines(): mobj = re.match('^\s*(\S.*\S)\s*\t\s*(\S.*\S)\s*\t\s*(\S.*\S)$', line) if mobj: info_list.append(mobj.groups()) return info_list
python
def getMenu(self): """Get manager interface section list from Squid Proxy Server @return: List of tuples (section, description, type) """ data = self._retrieve('') info_list = [] for line in data.splitlines(): mobj = re.match('^\s*(\S.*\S)\s*\t\s*(\S.*\S)\s*\t\s*(\S.*\S)$', line) if mobj: info_list.append(mobj.groups()) return info_list
[ "def", "getMenu", "(", "self", ")", ":", "data", "=", "self", ".", "_retrieve", "(", "''", ")", "info_list", "=", "[", "]", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'^\\s*(\\S.*\\S)\\s*\\t\\s*(\\S.*\\S)\\s*\\t\\s*(\\S.*\\S)$'", ",", "line", ")", "if", "mobj", ":", "info_list", ".", "append", "(", "mobj", ".", "groups", "(", ")", ")", "return", "info_list" ]
Get manager interface section list from Squid Proxy Server @return: List of tuples (section, description, type)
[ "Get", "manager", "interface", "section", "list", "from", "Squid", "Proxy", "Server" ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/squid.py#L170-L182
aouyar/PyMunin
pysysinfo/wanpipe.py
WanpipeInfo.getIfaceStats
def getIfaceStats(self): """Return dictionary of Traffic Stats for each Wanpipe Interface. @return: Nested dictionary of statistics for each interface. """ ifInfo = netiface.NetIfaceInfo() ifStats = ifInfo.getIfStats() info_dict = {} for ifname in ifStats: if re.match('^w\d+g\d+$', ifname): info_dict[ifname] = ifStats[ifname] return info_dict
python
def getIfaceStats(self): """Return dictionary of Traffic Stats for each Wanpipe Interface. @return: Nested dictionary of statistics for each interface. """ ifInfo = netiface.NetIfaceInfo() ifStats = ifInfo.getIfStats() info_dict = {} for ifname in ifStats: if re.match('^w\d+g\d+$', ifname): info_dict[ifname] = ifStats[ifname] return info_dict
[ "def", "getIfaceStats", "(", "self", ")", ":", "ifInfo", "=", "netiface", ".", "NetIfaceInfo", "(", ")", "ifStats", "=", "ifInfo", ".", "getIfStats", "(", ")", "info_dict", "=", "{", "}", "for", "ifname", "in", "ifStats", ":", "if", "re", ".", "match", "(", "'^w\\d+g\\d+$'", ",", "ifname", ")", ":", "info_dict", "[", "ifname", "]", "=", "ifStats", "[", "ifname", "]", "return", "info_dict" ]
Return dictionary of Traffic Stats for each Wanpipe Interface. @return: Nested dictionary of statistics for each interface.
[ "Return", "dictionary", "of", "Traffic", "Stats", "for", "each", "Wanpipe", "Interface", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/wanpipe.py#L27-L39
aouyar/PyMunin
pysysinfo/wanpipe.py
WanpipeInfo.getPRIstats
def getPRIstats(self, iface): """Return RDSI Operational Stats for interface. @param iface: Interface name. (Ex. w1g1) @return: Nested dictionary of statistics for interface. """ info_dict = {} output = util.exec_command([wanpipemonCmd, '-i', iface, '-c', 'Ta']) for line in output.splitlines(): mobj = re.match('^\s*(Line Code Violation|Far End Block Errors|' 'CRC4 Errors|FAS Errors)\s*:\s*(\d+)\s*$', line, re.IGNORECASE) if mobj: info_dict[mobj.group(1).lower().replace(' ', '')] = int(mobj.group(2)) continue mobj = re.match('^\s*(Rx Level)\s*:\s*>{0,1}\s*([-\d\.]+)db\s*', line, re.IGNORECASE) if mobj: info_dict[mobj.group(1).lower().replace(' ', '')] = float(mobj.group(2)) continue return info_dict
python
def getPRIstats(self, iface): """Return RDSI Operational Stats for interface. @param iface: Interface name. (Ex. w1g1) @return: Nested dictionary of statistics for interface. """ info_dict = {} output = util.exec_command([wanpipemonCmd, '-i', iface, '-c', 'Ta']) for line in output.splitlines(): mobj = re.match('^\s*(Line Code Violation|Far End Block Errors|' 'CRC4 Errors|FAS Errors)\s*:\s*(\d+)\s*$', line, re.IGNORECASE) if mobj: info_dict[mobj.group(1).lower().replace(' ', '')] = int(mobj.group(2)) continue mobj = re.match('^\s*(Rx Level)\s*:\s*>{0,1}\s*([-\d\.]+)db\s*', line, re.IGNORECASE) if mobj: info_dict[mobj.group(1).lower().replace(' ', '')] = float(mobj.group(2)) continue return info_dict
[ "def", "getPRIstats", "(", "self", ",", "iface", ")", ":", "info_dict", "=", "{", "}", "output", "=", "util", ".", "exec_command", "(", "[", "wanpipemonCmd", ",", "'-i'", ",", "iface", ",", "'-c'", ",", "'Ta'", "]", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'^\\s*(Line Code Violation|Far End Block Errors|'", "'CRC4 Errors|FAS Errors)\\s*:\\s*(\\d+)\\s*$'", ",", "line", ",", "re", ".", "IGNORECASE", ")", "if", "mobj", ":", "info_dict", "[", "mobj", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "''", ")", "]", "=", "int", "(", "mobj", ".", "group", "(", "2", ")", ")", "continue", "mobj", "=", "re", ".", "match", "(", "'^\\s*(Rx Level)\\s*:\\s*>{0,1}\\s*([-\\d\\.]+)db\\s*'", ",", "line", ",", "re", ".", "IGNORECASE", ")", "if", "mobj", ":", "info_dict", "[", "mobj", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "''", ")", "]", "=", "float", "(", "mobj", ".", "group", "(", "2", ")", ")", "continue", "return", "info_dict" ]
Return RDSI Operational Stats for interface. @param iface: Interface name. (Ex. w1g1) @return: Nested dictionary of statistics for interface.
[ "Return", "RDSI", "Operational", "Stats", "for", "interface", ".", "@param", "iface", ":", "Interface", "name", ".", "(", "Ex", ".", "w1g1", ")", "@return", ":", "Nested", "dictionary", "of", "statistics", "for", "interface", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/wanpipe.py#L41-L62
aouyar/PyMunin
pysysinfo/freeswitch.py
FSinfo._connect
def _connect(self): """Connect to FreeSWITCH ESL Interface.""" try: self._eslconn = ESL.ESLconnection(self._eslhost, str(self._eslport), self._eslpass) except: pass if not self._eslconn.connected(): raise Exception( "Connection to FreeSWITCH ESL Interface on host %s and port %d failed." % (self._eslhost, self._eslport) )
python
def _connect(self): """Connect to FreeSWITCH ESL Interface.""" try: self._eslconn = ESL.ESLconnection(self._eslhost, str(self._eslport), self._eslpass) except: pass if not self._eslconn.connected(): raise Exception( "Connection to FreeSWITCH ESL Interface on host %s and port %d failed." % (self._eslhost, self._eslport) )
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "self", ".", "_eslconn", "=", "ESL", ".", "ESLconnection", "(", "self", ".", "_eslhost", ",", "str", "(", "self", ".", "_eslport", ")", ",", "self", ".", "_eslpass", ")", "except", ":", "pass", "if", "not", "self", ".", "_eslconn", ".", "connected", "(", ")", ":", "raise", "Exception", "(", "\"Connection to FreeSWITCH ESL Interface on host %s and port %d failed.\"", "%", "(", "self", ".", "_eslhost", ",", "self", ".", "_eslport", ")", ")" ]
Connect to FreeSWITCH ESL Interface.
[ "Connect", "to", "FreeSWITCH", "ESL", "Interface", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/freeswitch.py#L60-L72
aouyar/PyMunin
pysysinfo/freeswitch.py
FSinfo._execCmd
def _execCmd(self, cmd, args): """Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary. """ output = self._eslconn.api(cmd, args) if output: body = output.getBody() if body: return body.splitlines() return None
python
def _execCmd(self, cmd, args): """Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary. """ output = self._eslconn.api(cmd, args) if output: body = output.getBody() if body: return body.splitlines() return None
[ "def", "_execCmd", "(", "self", ",", "cmd", ",", "args", ")", ":", "output", "=", "self", ".", "_eslconn", ".", "api", "(", "cmd", ",", "args", ")", "if", "output", ":", "body", "=", "output", ".", "getBody", "(", ")", "if", "body", ":", "return", "body", ".", "splitlines", "(", ")", "return", "None" ]
Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary.
[ "Execute", "command", "and", "return", "result", "body", "as", "list", "of", "lines", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/freeswitch.py#L74-L87
aouyar/PyMunin
pysysinfo/freeswitch.py
FSinfo._execShowCmd
def _execShowCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd) if lines and len(lines) >= 2 and lines[0] != '' and lines[0][0] != '-': result = {} result['keys'] = lines[0].split(',') items = [] for line in lines[1:]: if line == '': break items.append(line.split(',')) result['items'] = items return result
python
def _execShowCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd) if lines and len(lines) >= 2 and lines[0] != '' and lines[0][0] != '-': result = {} result['keys'] = lines[0].split(',') items = [] for line in lines[1:]: if line == '': break items.append(line.split(',')) result['items'] = items return result
[ "def", "_execShowCmd", "(", "self", ",", "showcmd", ")", ":", "result", "=", "None", "lines", "=", "self", ".", "_execCmd", "(", "\"show\"", ",", "showcmd", ")", "if", "lines", "and", "len", "(", "lines", ")", ">=", "2", "and", "lines", "[", "0", "]", "!=", "''", "and", "lines", "[", "0", "]", "[", "0", "]", "!=", "'-'", ":", "result", "=", "{", "}", "result", "[", "'keys'", "]", "=", "lines", "[", "0", "]", ".", "split", "(", "','", ")", "items", "=", "[", "]", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "if", "line", "==", "''", ":", "break", "items", ".", "append", "(", "line", ".", "split", "(", "','", ")", ")", "result", "[", "'items'", "]", "=", "items", "return", "result" ]
Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary.
[ "Execute", "show", "command", "and", "return", "result", "dictionary", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/freeswitch.py#L89-L107
aouyar/PyMunin
pysysinfo/freeswitch.py
FSinfo._execShowCountCmd
def _execShowCountCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd + " count") for line in lines: mobj = re.match('\s*(\d+)\s+total', line) if mobj: return int(mobj.group(1)) return result
python
def _execShowCountCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd + " count") for line in lines: mobj = re.match('\s*(\d+)\s+total', line) if mobj: return int(mobj.group(1)) return result
[ "def", "_execShowCountCmd", "(", "self", ",", "showcmd", ")", ":", "result", "=", "None", "lines", "=", "self", ".", "_execCmd", "(", "\"show\"", ",", "showcmd", "+", "\" count\"", ")", "for", "line", "in", "lines", ":", "mobj", "=", "re", ".", "match", "(", "'\\s*(\\d+)\\s+total'", ",", "line", ")", "if", "mobj", ":", "return", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "return", "result" ]
Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary.
[ "Execute", "show", "command", "and", "return", "result", "dictionary", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/freeswitch.py#L109-L122
aouyar/PyMunin
pymunin/plugins/asteriskstats.py
MuninAsteriskPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('asterisk_calls') or self.hasGraph('asterisk_channels'): stats = self._ami.getChannelStats(self._chanList) if self.hasGraph('asterisk_calls') and stats: self.setGraphVal('asterisk_calls', 'active_calls', stats.get('active_calls')) self.setGraphVal('asterisk_calls', 'calls_per_min', stats.get('calls_processed')) if self.hasGraph('asterisk_channels') and stats: for field in self._chanList: self.setGraphVal('asterisk_channels', field, stats.get(field)) if 'dahdi' in self._chanList: self.setGraphVal('asterisk_channels', 'mix', stats.get('mix')) if self.hasGraph('asterisk_peers_sip'): stats = self._ami.getPeerStats('sip') if stats: for field in ('online', 'unmonitored', 'unreachable', 'lagged', 'unknown'): self.setGraphVal('asterisk_peers_sip', field, stats.get(field)) if self.hasGraph('asterisk_peers_iax2'): stats = self._ami.getPeerStats('iax2') if stats: for field in ('online', 'unmonitored', 'unreachable', 'lagged', 'unknown'): self.setGraphVal('asterisk_peers_iax2', field, stats.get(field)) if self.hasGraph('asterisk_voip_codecs'): sipstats = self._ami.getVoIPchanStats('sip', self._codecList) or {} iax2stats = self._ami.getVoIPchanStats('iax2', self._codecList) or {} if stats: for field in self._codecList: self.setGraphVal('asterisk_voip_codecs', field, sipstats.get(field,0) + iax2stats.get(field, 0)) self.setGraphVal('asterisk_voip_codecs', 'other', sipstats.get('other', 0) + iax2stats.get('other', 0)) if self.hasGraph('asterisk_conferences'): stats = self._ami.getConferenceStats() if stats: self.setGraphVal('asterisk_conferences', 'rooms', stats.get('active_conferences')) self.setGraphVal('asterisk_conferences', 'users', stats.get('conference_users')) if self.hasGraph('asterisk_voicemail'): stats = self._ami.getVoicemailStats() if stats: self.setGraphVal('asterisk_voicemail', 'accounts', stats.get('accounts')) self.setGraphVal('asterisk_voicemail', 'msg_avg', stats.get('avg_messages')) self.setGraphVal('asterisk_voicemail', 'msg_max', stats.get('max_messages')) self.setGraphVal('asterisk_voicemail', 'msg_total', stats.get('total_messages')) if self.hasGraph('asterisk_trunks') and len(self._trunkList) > 0: stats = self._ami.getTrunkStats(self._trunkList) for trunk in self._trunkList: self.setGraphVal('asterisk_trunks', trunk[0], stats.get(trunk[0])) if self._queues is not None: total_answer = 0 total_abandon = 0 for queue in self._queue_list: stats = self._queues[queue] if self.hasGraph('asterisk_queue_len'): self.setGraphVal('asterisk_queue_len', queue, stats.get('queue_len')) if self.hasGraph('asterisk_queue_avg_hold'): self.setGraphVal('asterisk_queue_avg_hold', queue, stats.get('avg_holdtime')) if self.hasGraph('asterisk_queue_avg_talk'): self.setGraphVal('asterisk_queue_avg_talk', queue, stats.get('avg_talktime')) if self.hasGraph('asterisk_queue_calls'): total_abandon += stats.get('calls_abandoned') total_answer += stats.get('calls_completed') if self.hasGraph('asterisk_queue_abandon_pcent'): prev_stats = self._queues_prev.get(queue) if prev_stats is not None: abandon = (stats.get('calls_abandoned', 0) - prev_stats.get('calls_abandoned', 0)) answer = (stats.get('calls_completed', 0) - prev_stats.get('calls_completed', 0)) total = abandon + answer if total > 0: val = 100.0 * float(abandon) / float(total) else: val = 0 self.setGraphVal('asterisk_queue_abandon_pcent', queue, val) if self.hasGraph('asterisk_queue_calls'): self.setGraphVal('asterisk_queue_calls', 'abandon', total_abandon) self.setGraphVal('asterisk_queue_calls', 'answer', total_answer) if self.hasGraph('asterisk_fax_attempts'): fax_stats = self._ami.getFaxStatsCounters() stats = fax_stats.get('general') if stats is not None: self.setGraphVal('asterisk_fax_attempts', 'send', stats.get('transmit attempts')) self.setGraphVal('asterisk_fax_attempts', 'recv', stats.get('receive attempts')) self.setGraphVal('asterisk_fax_attempts', 'fail', stats.get('failed faxes'))
python
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('asterisk_calls') or self.hasGraph('asterisk_channels'): stats = self._ami.getChannelStats(self._chanList) if self.hasGraph('asterisk_calls') and stats: self.setGraphVal('asterisk_calls', 'active_calls', stats.get('active_calls')) self.setGraphVal('asterisk_calls', 'calls_per_min', stats.get('calls_processed')) if self.hasGraph('asterisk_channels') and stats: for field in self._chanList: self.setGraphVal('asterisk_channels', field, stats.get(field)) if 'dahdi' in self._chanList: self.setGraphVal('asterisk_channels', 'mix', stats.get('mix')) if self.hasGraph('asterisk_peers_sip'): stats = self._ami.getPeerStats('sip') if stats: for field in ('online', 'unmonitored', 'unreachable', 'lagged', 'unknown'): self.setGraphVal('asterisk_peers_sip', field, stats.get(field)) if self.hasGraph('asterisk_peers_iax2'): stats = self._ami.getPeerStats('iax2') if stats: for field in ('online', 'unmonitored', 'unreachable', 'lagged', 'unknown'): self.setGraphVal('asterisk_peers_iax2', field, stats.get(field)) if self.hasGraph('asterisk_voip_codecs'): sipstats = self._ami.getVoIPchanStats('sip', self._codecList) or {} iax2stats = self._ami.getVoIPchanStats('iax2', self._codecList) or {} if stats: for field in self._codecList: self.setGraphVal('asterisk_voip_codecs', field, sipstats.get(field,0) + iax2stats.get(field, 0)) self.setGraphVal('asterisk_voip_codecs', 'other', sipstats.get('other', 0) + iax2stats.get('other', 0)) if self.hasGraph('asterisk_conferences'): stats = self._ami.getConferenceStats() if stats: self.setGraphVal('asterisk_conferences', 'rooms', stats.get('active_conferences')) self.setGraphVal('asterisk_conferences', 'users', stats.get('conference_users')) if self.hasGraph('asterisk_voicemail'): stats = self._ami.getVoicemailStats() if stats: self.setGraphVal('asterisk_voicemail', 'accounts', stats.get('accounts')) self.setGraphVal('asterisk_voicemail', 'msg_avg', stats.get('avg_messages')) self.setGraphVal('asterisk_voicemail', 'msg_max', stats.get('max_messages')) self.setGraphVal('asterisk_voicemail', 'msg_total', stats.get('total_messages')) if self.hasGraph('asterisk_trunks') and len(self._trunkList) > 0: stats = self._ami.getTrunkStats(self._trunkList) for trunk in self._trunkList: self.setGraphVal('asterisk_trunks', trunk[0], stats.get(trunk[0])) if self._queues is not None: total_answer = 0 total_abandon = 0 for queue in self._queue_list: stats = self._queues[queue] if self.hasGraph('asterisk_queue_len'): self.setGraphVal('asterisk_queue_len', queue, stats.get('queue_len')) if self.hasGraph('asterisk_queue_avg_hold'): self.setGraphVal('asterisk_queue_avg_hold', queue, stats.get('avg_holdtime')) if self.hasGraph('asterisk_queue_avg_talk'): self.setGraphVal('asterisk_queue_avg_talk', queue, stats.get('avg_talktime')) if self.hasGraph('asterisk_queue_calls'): total_abandon += stats.get('calls_abandoned') total_answer += stats.get('calls_completed') if self.hasGraph('asterisk_queue_abandon_pcent'): prev_stats = self._queues_prev.get(queue) if prev_stats is not None: abandon = (stats.get('calls_abandoned', 0) - prev_stats.get('calls_abandoned', 0)) answer = (stats.get('calls_completed', 0) - prev_stats.get('calls_completed', 0)) total = abandon + answer if total > 0: val = 100.0 * float(abandon) / float(total) else: val = 0 self.setGraphVal('asterisk_queue_abandon_pcent', queue, val) if self.hasGraph('asterisk_queue_calls'): self.setGraphVal('asterisk_queue_calls', 'abandon', total_abandon) self.setGraphVal('asterisk_queue_calls', 'answer', total_answer) if self.hasGraph('asterisk_fax_attempts'): fax_stats = self._ami.getFaxStatsCounters() stats = fax_stats.get('general') if stats is not None: self.setGraphVal('asterisk_fax_attempts', 'send', stats.get('transmit attempts')) self.setGraphVal('asterisk_fax_attempts', 'recv', stats.get('receive attempts')) self.setGraphVal('asterisk_fax_attempts', 'fail', stats.get('failed faxes'))
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "hasGraph", "(", "'asterisk_calls'", ")", "or", "self", ".", "hasGraph", "(", "'asterisk_channels'", ")", ":", "stats", "=", "self", ".", "_ami", ".", "getChannelStats", "(", "self", ".", "_chanList", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_calls'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'asterisk_calls'", ",", "'active_calls'", ",", "stats", ".", "get", "(", "'active_calls'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_calls'", ",", "'calls_per_min'", ",", "stats", ".", "get", "(", "'calls_processed'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_channels'", ")", "and", "stats", ":", "for", "field", "in", "self", ".", "_chanList", ":", "self", ".", "setGraphVal", "(", "'asterisk_channels'", ",", "field", ",", "stats", ".", "get", "(", "field", ")", ")", "if", "'dahdi'", "in", "self", ".", "_chanList", ":", "self", ".", "setGraphVal", "(", "'asterisk_channels'", ",", "'mix'", ",", "stats", ".", "get", "(", "'mix'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_peers_sip'", ")", ":", "stats", "=", "self", ".", "_ami", ".", "getPeerStats", "(", "'sip'", ")", "if", "stats", ":", "for", "field", "in", "(", "'online'", ",", "'unmonitored'", ",", "'unreachable'", ",", "'lagged'", ",", "'unknown'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_peers_sip'", ",", "field", ",", "stats", ".", "get", "(", "field", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_peers_iax2'", ")", ":", "stats", "=", "self", ".", "_ami", ".", "getPeerStats", "(", "'iax2'", ")", "if", "stats", ":", "for", "field", "in", "(", "'online'", ",", "'unmonitored'", ",", "'unreachable'", ",", "'lagged'", ",", "'unknown'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_peers_iax2'", ",", "field", ",", "stats", ".", "get", "(", "field", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_voip_codecs'", ")", ":", "sipstats", "=", "self", ".", "_ami", ".", "getVoIPchanStats", "(", "'sip'", ",", "self", ".", "_codecList", ")", "or", "{", "}", "iax2stats", "=", "self", ".", "_ami", ".", "getVoIPchanStats", "(", "'iax2'", ",", "self", ".", "_codecList", ")", "or", "{", "}", "if", "stats", ":", "for", "field", "in", "self", ".", "_codecList", ":", "self", ".", "setGraphVal", "(", "'asterisk_voip_codecs'", ",", "field", ",", "sipstats", ".", "get", "(", "field", ",", "0", ")", "+", "iax2stats", ".", "get", "(", "field", ",", "0", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_voip_codecs'", ",", "'other'", ",", "sipstats", ".", "get", "(", "'other'", ",", "0", ")", "+", "iax2stats", ".", "get", "(", "'other'", ",", "0", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_conferences'", ")", ":", "stats", "=", "self", ".", "_ami", ".", "getConferenceStats", "(", ")", "if", "stats", ":", "self", ".", "setGraphVal", "(", "'asterisk_conferences'", ",", "'rooms'", ",", "stats", ".", "get", "(", "'active_conferences'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_conferences'", ",", "'users'", ",", "stats", ".", "get", "(", "'conference_users'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_voicemail'", ")", ":", "stats", "=", "self", ".", "_ami", ".", "getVoicemailStats", "(", ")", "if", "stats", ":", "self", ".", "setGraphVal", "(", "'asterisk_voicemail'", ",", "'accounts'", ",", "stats", ".", "get", "(", "'accounts'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_voicemail'", ",", "'msg_avg'", ",", "stats", ".", "get", "(", "'avg_messages'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_voicemail'", ",", "'msg_max'", ",", "stats", ".", "get", "(", "'max_messages'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_voicemail'", ",", "'msg_total'", ",", "stats", ".", "get", "(", "'total_messages'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_trunks'", ")", "and", "len", "(", "self", ".", "_trunkList", ")", ">", "0", ":", "stats", "=", "self", ".", "_ami", ".", "getTrunkStats", "(", "self", ".", "_trunkList", ")", "for", "trunk", "in", "self", ".", "_trunkList", ":", "self", ".", "setGraphVal", "(", "'asterisk_trunks'", ",", "trunk", "[", "0", "]", ",", "stats", ".", "get", "(", "trunk", "[", "0", "]", ")", ")", "if", "self", ".", "_queues", "is", "not", "None", ":", "total_answer", "=", "0", "total_abandon", "=", "0", "for", "queue", "in", "self", ".", "_queue_list", ":", "stats", "=", "self", ".", "_queues", "[", "queue", "]", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_len'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_queue_len'", ",", "queue", ",", "stats", ".", "get", "(", "'queue_len'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_avg_hold'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_queue_avg_hold'", ",", "queue", ",", "stats", ".", "get", "(", "'avg_holdtime'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_avg_talk'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_queue_avg_talk'", ",", "queue", ",", "stats", ".", "get", "(", "'avg_talktime'", ")", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_calls'", ")", ":", "total_abandon", "+=", "stats", ".", "get", "(", "'calls_abandoned'", ")", "total_answer", "+=", "stats", ".", "get", "(", "'calls_completed'", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_abandon_pcent'", ")", ":", "prev_stats", "=", "self", ".", "_queues_prev", ".", "get", "(", "queue", ")", "if", "prev_stats", "is", "not", "None", ":", "abandon", "=", "(", "stats", ".", "get", "(", "'calls_abandoned'", ",", "0", ")", "-", "prev_stats", ".", "get", "(", "'calls_abandoned'", ",", "0", ")", ")", "answer", "=", "(", "stats", ".", "get", "(", "'calls_completed'", ",", "0", ")", "-", "prev_stats", ".", "get", "(", "'calls_completed'", ",", "0", ")", ")", "total", "=", "abandon", "+", "answer", "if", "total", ">", "0", ":", "val", "=", "100.0", "*", "float", "(", "abandon", ")", "/", "float", "(", "total", ")", "else", ":", "val", "=", "0", "self", ".", "setGraphVal", "(", "'asterisk_queue_abandon_pcent'", ",", "queue", ",", "val", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_queue_calls'", ")", ":", "self", ".", "setGraphVal", "(", "'asterisk_queue_calls'", ",", "'abandon'", ",", "total_abandon", ")", "self", ".", "setGraphVal", "(", "'asterisk_queue_calls'", ",", "'answer'", ",", "total_answer", ")", "if", "self", ".", "hasGraph", "(", "'asterisk_fax_attempts'", ")", ":", "fax_stats", "=", "self", ".", "_ami", ".", "getFaxStatsCounters", "(", ")", "stats", "=", "fax_stats", ".", "get", "(", "'general'", ")", "if", "stats", "is", "not", "None", ":", "self", ".", "setGraphVal", "(", "'asterisk_fax_attempts'", ",", "'send'", ",", "stats", ".", "get", "(", "'transmit attempts'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_fax_attempts'", ",", "'recv'", ",", "stats", ".", "get", "(", "'receive attempts'", ")", ")", "self", ".", "setGraphVal", "(", "'asterisk_fax_attempts'", ",", "'fail'", ",", "stats", ".", "get", "(", "'failed faxes'", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/asteriskstats.py#L309-L426
aouyar/PyMunin
pymunin/plugins/phpfpmstats.py
MuninPHPfpmPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = fpminfo.getStats() if self.hasGraph('php_fpm_connections') and stats: self.setGraphVal('php_fpm_connections', 'conn', stats['accepted conn']) if self.hasGraph('php_fpm_processes') and stats: self.setGraphVal('php_fpm_processes', 'active', stats['active processes']) self.setGraphVal('php_fpm_processes', 'idle', stats['idle processes']) self.setGraphVal('php_fpm_processes', 'total', stats['total processes'])
python
def retrieveVals(self): """Retrieve values for graphs.""" fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = fpminfo.getStats() if self.hasGraph('php_fpm_connections') and stats: self.setGraphVal('php_fpm_connections', 'conn', stats['accepted conn']) if self.hasGraph('php_fpm_processes') and stats: self.setGraphVal('php_fpm_processes', 'active', stats['active processes']) self.setGraphVal('php_fpm_processes', 'idle', stats['idle processes']) self.setGraphVal('php_fpm_processes', 'total', stats['total processes'])
[ "def", "retrieveVals", "(", "self", ")", ":", "fpminfo", "=", "PHPfpmInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "stats", "=", "fpminfo", ".", "getStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_fpm_connections'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_fpm_connections'", ",", "'conn'", ",", "stats", "[", "'accepted conn'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_fpm_processes'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_fpm_processes'", ",", "'active'", ",", "stats", "[", "'active processes'", "]", ")", "self", ".", "setGraphVal", "(", "'php_fpm_processes'", ",", "'idle'", ",", "stats", "[", "'idle processes'", "]", ")", "self", ".", "setGraphVal", "(", "'php_fpm_processes'", ",", "'total'", ",", "stats", "[", "'total processes'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpfpmstats.py#L111-L125
aouyar/PyMunin
pymunin/plugins/phpfpmstats.py
MuninPHPfpmPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return fpminfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return fpminfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "fpminfo", "=", "PHPfpmInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "return", "fpminfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpfpmstats.py#L127-L135
aouyar/PyMunin
pysysinfo/redisdb.py
RedisInfo.ping
def ping(self): """Ping Redis Server and return Round-Trip-Time in seconds. @return: Round-trip-time in seconds as float. """ start = time.time() self._conn.ping() return (time.time() - start)
python
def ping(self): """Ping Redis Server and return Round-Trip-Time in seconds. @return: Round-trip-time in seconds as float. """ start = time.time() self._conn.ping() return (time.time() - start)
[ "def", "ping", "(", "self", ")", ":", "start", "=", "time", ".", "time", "(", ")", "self", ".", "_conn", ".", "ping", "(", ")", "return", "(", "time", ".", "time", "(", ")", "-", "start", ")" ]
Ping Redis Server and return Round-Trip-Time in seconds. @return: Round-trip-time in seconds as float.
[ "Ping", "Redis", "Server", "and", "return", "Round", "-", "Trip", "-", "Time", "in", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/redisdb.py#L49-L57
ContextLab/quail
quail/helpers.py
list2pd
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
python
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
[ "def", "list2pd", "(", "all_data", ",", "subjindex", "=", "None", ",", "listindex", "=", "None", ")", ":", "# set default index if it is not defined", "# max_nlists = max(map(lambda x: len(x), all_data))", "listindex", "=", "[", "[", "idx", "for", "idx", "in", "range", "(", "len", "(", "sub", ")", ")", "]", "for", "sub", "in", "all_data", "]", "if", "not", "listindex", "else", "listindex", "subjindex", "=", "[", "idx", "for", "idx", ",", "subj", "in", "enumerate", "(", "all_data", ")", "]", "if", "not", "subjindex", "else", "subjindex", "def", "make_multi_index", "(", "listindex", ",", "sub_num", ")", ":", "return", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "[", "(", "sub_num", ",", "lst", ")", "for", "lst", "in", "listindex", "]", ",", "names", "=", "[", "'Subject'", ",", "'List'", "]", ")", "listindex", "=", "list", "(", "listindex", ")", "subjindex", "=", "list", "(", "subjindex", ")", "subs_list_of_dfs", "=", "[", "pd", ".", "DataFrame", "(", "sub_data", ",", "index", "=", "make_multi_index", "(", "listindex", "[", "sub_num", "]", ",", "subjindex", "[", "sub_num", "]", ")", ")", "for", "sub_num", ",", "sub_data", "in", "enumerate", "(", "all_data", ")", "]", "return", "pd", ".", "concat", "(", "subs_list_of_dfs", ")" ]
Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number
[ "Makes", "multi", "-", "indexed", "dataframe", "of", "subject", "data" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L11-L43
ContextLab/quail
quail/helpers.py
recmat2egg
def recmat2egg(recmat, list_length=None): """ Creates egg data object from zero-indexed recall matrix Parameters ---------- recmat : list of lists (subs) of lists (encoding lists) of ints or 2D numpy array recall matrix representing serial positions of freely recalled words \ e.g. [[[16, 15, 0, 2, 3, None, None...], [16, 4, 5, 6, 1, None, None...]]] list_length : int The length of each list (e.g. 16) Returns ---------- egg : Egg data object egg data object computed from the recall matrix """ from .egg import Egg as Egg pres = [[[str(word) for word in list(range(0,list_length))] for reclist in recsub] for recsub in recmat] rec = [[[str(word) for word in reclist if word is not None] for reclist in recsub] for recsub in recmat] return Egg(pres=pres,rec=rec)
python
def recmat2egg(recmat, list_length=None): """ Creates egg data object from zero-indexed recall matrix Parameters ---------- recmat : list of lists (subs) of lists (encoding lists) of ints or 2D numpy array recall matrix representing serial positions of freely recalled words \ e.g. [[[16, 15, 0, 2, 3, None, None...], [16, 4, 5, 6, 1, None, None...]]] list_length : int The length of each list (e.g. 16) Returns ---------- egg : Egg data object egg data object computed from the recall matrix """ from .egg import Egg as Egg pres = [[[str(word) for word in list(range(0,list_length))] for reclist in recsub] for recsub in recmat] rec = [[[str(word) for word in reclist if word is not None] for reclist in recsub] for recsub in recmat] return Egg(pres=pres,rec=rec)
[ "def", "recmat2egg", "(", "recmat", ",", "list_length", "=", "None", ")", ":", "from", ".", "egg", "import", "Egg", "as", "Egg", "pres", "=", "[", "[", "[", "str", "(", "word", ")", "for", "word", "in", "list", "(", "range", "(", "0", ",", "list_length", ")", ")", "]", "for", "reclist", "in", "recsub", "]", "for", "recsub", "in", "recmat", "]", "rec", "=", "[", "[", "[", "str", "(", "word", ")", "for", "word", "in", "reclist", "if", "word", "is", "not", "None", "]", "for", "reclist", "in", "recsub", "]", "for", "recsub", "in", "recmat", "]", "return", "Egg", "(", "pres", "=", "pres", ",", "rec", "=", "rec", ")" ]
Creates egg data object from zero-indexed recall matrix Parameters ---------- recmat : list of lists (subs) of lists (encoding lists) of ints or 2D numpy array recall matrix representing serial positions of freely recalled words \ e.g. [[[16, 15, 0, 2, 3, None, None...], [16, 4, 5, 6, 1, None, None...]]] list_length : int The length of each list (e.g. 16) Returns ---------- egg : Egg data object egg data object computed from the recall matrix
[ "Creates", "egg", "data", "object", "from", "zero", "-", "indexed", "recall", "matrix" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L72-L96
ContextLab/quail
quail/helpers.py
default_dist_funcs
def default_dist_funcs(dist_funcs, feature_example): """ Fills in default distance metrics for fingerprint analyses """ if dist_funcs is None: dist_funcs = dict() for key in feature_example: if key in dist_funcs: pass if key == 'item': pass elif isinstance(feature_example[key], (six.string_types, six.binary_type)): dist_funcs[key] = 'match' elif isinstance(feature_example[key], (int, np.integer, float)) or all([isinstance(i, (int, np.integer, float)) for i in feature_example[key]]): dist_funcs[key] = 'euclidean' return dist_funcs
python
def default_dist_funcs(dist_funcs, feature_example): """ Fills in default distance metrics for fingerprint analyses """ if dist_funcs is None: dist_funcs = dict() for key in feature_example: if key in dist_funcs: pass if key == 'item': pass elif isinstance(feature_example[key], (six.string_types, six.binary_type)): dist_funcs[key] = 'match' elif isinstance(feature_example[key], (int, np.integer, float)) or all([isinstance(i, (int, np.integer, float)) for i in feature_example[key]]): dist_funcs[key] = 'euclidean' return dist_funcs
[ "def", "default_dist_funcs", "(", "dist_funcs", ",", "feature_example", ")", ":", "if", "dist_funcs", "is", "None", ":", "dist_funcs", "=", "dict", "(", ")", "for", "key", "in", "feature_example", ":", "if", "key", "in", "dist_funcs", ":", "pass", "if", "key", "==", "'item'", ":", "pass", "elif", "isinstance", "(", "feature_example", "[", "key", "]", ",", "(", "six", ".", "string_types", ",", "six", ".", "binary_type", ")", ")", ":", "dist_funcs", "[", "key", "]", "=", "'match'", "elif", "isinstance", "(", "feature_example", "[", "key", "]", ",", "(", "int", ",", "np", ".", "integer", ",", "float", ")", ")", "or", "all", "(", "[", "isinstance", "(", "i", ",", "(", "int", ",", "np", ".", "integer", ",", "float", ")", ")", "for", "i", "in", "feature_example", "[", "key", "]", "]", ")", ":", "dist_funcs", "[", "key", "]", "=", "'euclidean'", "return", "dist_funcs" ]
Fills in default distance metrics for fingerprint analyses
[ "Fills", "in", "default", "distance", "metrics", "for", "fingerprint", "analyses" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L98-L116
ContextLab/quail
quail/helpers.py
stack_eggs
def stack_eggs(eggs, meta='concatenate'): ''' Takes a list of eggs, stacks them and reindexes the subject number Parameters ---------- eggs : list of Egg data objects A list of Eggs that you want to combine meta : string Determines how the meta data of each Egg combines. Default is 'concatenate' 'concatenate' concatenates keys in meta data dictionary shared between eggs, and copies non-overlapping keys 'separate' keeps the Eggs' meta data dictionaries separate, with each as a list index in the stacked meta data Returns ---------- new_egg : Egg data object A mega egg comprised of the input eggs stacked together ''' from .egg import Egg pres = [egg.pres.loc[sub,:].values.tolist() for egg in eggs for sub in egg.pres.index.levels[0].values.tolist()] rec = [egg.rec.loc[sub,:].values.tolist() for egg in eggs for sub in egg.rec.index.levels[0].values.tolist()] if meta is 'concatenate': new_meta = {} for egg in eggs: for key in egg.meta: if key in new_meta: new_meta[key] = list(new_meta[key]) new_meta[key].extend(egg.meta.get(key)) else: new_meta[key] = egg.meta.get(key) elif meta is 'separate': new_meta = list(egg.meta for egg in eggs) return Egg(pres=pres, rec=rec, meta=new_meta)
python
def stack_eggs(eggs, meta='concatenate'): ''' Takes a list of eggs, stacks them and reindexes the subject number Parameters ---------- eggs : list of Egg data objects A list of Eggs that you want to combine meta : string Determines how the meta data of each Egg combines. Default is 'concatenate' 'concatenate' concatenates keys in meta data dictionary shared between eggs, and copies non-overlapping keys 'separate' keeps the Eggs' meta data dictionaries separate, with each as a list index in the stacked meta data Returns ---------- new_egg : Egg data object A mega egg comprised of the input eggs stacked together ''' from .egg import Egg pres = [egg.pres.loc[sub,:].values.tolist() for egg in eggs for sub in egg.pres.index.levels[0].values.tolist()] rec = [egg.rec.loc[sub,:].values.tolist() for egg in eggs for sub in egg.rec.index.levels[0].values.tolist()] if meta is 'concatenate': new_meta = {} for egg in eggs: for key in egg.meta: if key in new_meta: new_meta[key] = list(new_meta[key]) new_meta[key].extend(egg.meta.get(key)) else: new_meta[key] = egg.meta.get(key) elif meta is 'separate': new_meta = list(egg.meta for egg in eggs) return Egg(pres=pres, rec=rec, meta=new_meta)
[ "def", "stack_eggs", "(", "eggs", ",", "meta", "=", "'concatenate'", ")", ":", "from", ".", "egg", "import", "Egg", "pres", "=", "[", "egg", ".", "pres", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "egg", "in", "eggs", "for", "sub", "in", "egg", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", ".", "tolist", "(", ")", "]", "rec", "=", "[", "egg", ".", "rec", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "egg", "in", "eggs", "for", "sub", "in", "egg", ".", "rec", ".", "index", ".", "levels", "[", "0", "]", ".", "values", ".", "tolist", "(", ")", "]", "if", "meta", "is", "'concatenate'", ":", "new_meta", "=", "{", "}", "for", "egg", "in", "eggs", ":", "for", "key", "in", "egg", ".", "meta", ":", "if", "key", "in", "new_meta", ":", "new_meta", "[", "key", "]", "=", "list", "(", "new_meta", "[", "key", "]", ")", "new_meta", "[", "key", "]", ".", "extend", "(", "egg", ".", "meta", ".", "get", "(", "key", ")", ")", "else", ":", "new_meta", "[", "key", "]", "=", "egg", ".", "meta", ".", "get", "(", "key", ")", "elif", "meta", "is", "'separate'", ":", "new_meta", "=", "list", "(", "egg", ".", "meta", "for", "egg", "in", "eggs", ")", "return", "Egg", "(", "pres", "=", "pres", ",", "rec", "=", "rec", ",", "meta", "=", "new_meta", ")" ]
Takes a list of eggs, stacks them and reindexes the subject number Parameters ---------- eggs : list of Egg data objects A list of Eggs that you want to combine meta : string Determines how the meta data of each Egg combines. Default is 'concatenate' 'concatenate' concatenates keys in meta data dictionary shared between eggs, and copies non-overlapping keys 'separate' keeps the Eggs' meta data dictionaries separate, with each as a list index in the stacked meta data Returns ---------- new_egg : Egg data object A mega egg comprised of the input eggs stacked together
[ "Takes", "a", "list", "of", "eggs", "stacks", "them", "and", "reindexes", "the", "subject", "number" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L118-L156
ContextLab/quail
quail/helpers.py
crack_egg
def crack_egg(egg, subjects=None, lists=None): ''' Takes an egg and returns a subset of the subjects or lists Parameters ---------- egg : Egg data object Egg that you want to crack subjects : list List of subject idxs lists : list List of lists idxs Returns ---------- new_egg : Egg data object A sliced egg, good on a salad ''' from .egg import Egg if hasattr(egg, 'features'): all_have_features = egg.features is not None else: all_have_features=False opts = {} if subjects is None: subjects = egg.pres.index.levels[0].values.tolist() elif type(subjects) is not list: subjects = [subjects] if lists is None: lists = egg.pres.index.levels[1].values.tolist() elif type(lists) is not list: lists = [lists] idx = pd.IndexSlice pres = egg.pres.loc[idx[subjects,lists],egg.pres.columns] rec = egg.rec.loc[idx[subjects,lists],egg.rec.columns] pres = [pres.loc[sub,:].values.tolist() for sub in subjects] rec = [rec.loc[sub,:].values.tolist() for sub in subjects] if all_have_features: features = egg.features.loc[idx[subjects,lists],egg.features.columns] opts['features'] = [features.loc[sub,:].values.tolist() for sub in subjects] return Egg(pres=pres, rec=rec, **opts)
python
def crack_egg(egg, subjects=None, lists=None): ''' Takes an egg and returns a subset of the subjects or lists Parameters ---------- egg : Egg data object Egg that you want to crack subjects : list List of subject idxs lists : list List of lists idxs Returns ---------- new_egg : Egg data object A sliced egg, good on a salad ''' from .egg import Egg if hasattr(egg, 'features'): all_have_features = egg.features is not None else: all_have_features=False opts = {} if subjects is None: subjects = egg.pres.index.levels[0].values.tolist() elif type(subjects) is not list: subjects = [subjects] if lists is None: lists = egg.pres.index.levels[1].values.tolist() elif type(lists) is not list: lists = [lists] idx = pd.IndexSlice pres = egg.pres.loc[idx[subjects,lists],egg.pres.columns] rec = egg.rec.loc[idx[subjects,lists],egg.rec.columns] pres = [pres.loc[sub,:].values.tolist() for sub in subjects] rec = [rec.loc[sub,:].values.tolist() for sub in subjects] if all_have_features: features = egg.features.loc[idx[subjects,lists],egg.features.columns] opts['features'] = [features.loc[sub,:].values.tolist() for sub in subjects] return Egg(pres=pres, rec=rec, **opts)
[ "def", "crack_egg", "(", "egg", ",", "subjects", "=", "None", ",", "lists", "=", "None", ")", ":", "from", ".", "egg", "import", "Egg", "if", "hasattr", "(", "egg", ",", "'features'", ")", ":", "all_have_features", "=", "egg", ".", "features", "is", "not", "None", "else", ":", "all_have_features", "=", "False", "opts", "=", "{", "}", "if", "subjects", "is", "None", ":", "subjects", "=", "egg", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", ".", "tolist", "(", ")", "elif", "type", "(", "subjects", ")", "is", "not", "list", ":", "subjects", "=", "[", "subjects", "]", "if", "lists", "is", "None", ":", "lists", "=", "egg", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", ".", "tolist", "(", ")", "elif", "type", "(", "lists", ")", "is", "not", "list", ":", "lists", "=", "[", "lists", "]", "idx", "=", "pd", ".", "IndexSlice", "pres", "=", "egg", ".", "pres", ".", "loc", "[", "idx", "[", "subjects", ",", "lists", "]", ",", "egg", ".", "pres", ".", "columns", "]", "rec", "=", "egg", ".", "rec", ".", "loc", "[", "idx", "[", "subjects", ",", "lists", "]", ",", "egg", ".", "rec", ".", "columns", "]", "pres", "=", "[", "pres", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "sub", "in", "subjects", "]", "rec", "=", "[", "rec", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "sub", "in", "subjects", "]", "if", "all_have_features", ":", "features", "=", "egg", ".", "features", ".", "loc", "[", "idx", "[", "subjects", ",", "lists", "]", ",", "egg", ".", "features", ".", "columns", "]", "opts", "[", "'features'", "]", "=", "[", "features", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "sub", "in", "subjects", "]", "return", "Egg", "(", "pres", "=", "pres", ",", "rec", "=", "rec", ",", "*", "*", "opts", ")" ]
Takes an egg and returns a subset of the subjects or lists Parameters ---------- egg : Egg data object Egg that you want to crack subjects : list List of subject idxs lists : list List of lists idxs Returns ---------- new_egg : Egg data object A sliced egg, good on a salad
[ "Takes", "an", "egg", "and", "returns", "a", "subset", "of", "the", "subjects", "or", "lists" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L158-L208
ContextLab/quail
quail/helpers.py
df2list
def df2list(df): """ Convert a MultiIndex df to list Parameters ---------- df : pandas.DataFrame A MultiIndex DataFrame where the first level is subjects and the second level is lists (e.g. egg.pres) Returns ---------- lst : a list of lists of lists of values The input df reformatted as a list """ subjects = df.index.levels[0].values.tolist() lists = df.index.levels[1].values.tolist() idx = pd.IndexSlice df = df.loc[idx[subjects,lists],df.columns] lst = [df.loc[sub,:].values.tolist() for sub in subjects] return lst
python
def df2list(df): """ Convert a MultiIndex df to list Parameters ---------- df : pandas.DataFrame A MultiIndex DataFrame where the first level is subjects and the second level is lists (e.g. egg.pres) Returns ---------- lst : a list of lists of lists of values The input df reformatted as a list """ subjects = df.index.levels[0].values.tolist() lists = df.index.levels[1].values.tolist() idx = pd.IndexSlice df = df.loc[idx[subjects,lists],df.columns] lst = [df.loc[sub,:].values.tolist() for sub in subjects] return lst
[ "def", "df2list", "(", "df", ")", ":", "subjects", "=", "df", ".", "index", ".", "levels", "[", "0", "]", ".", "values", ".", "tolist", "(", ")", "lists", "=", "df", ".", "index", ".", "levels", "[", "1", "]", ".", "values", ".", "tolist", "(", ")", "idx", "=", "pd", ".", "IndexSlice", "df", "=", "df", ".", "loc", "[", "idx", "[", "subjects", ",", "lists", "]", ",", "df", ".", "columns", "]", "lst", "=", "[", "df", ".", "loc", "[", "sub", ",", ":", "]", ".", "values", ".", "tolist", "(", ")", "for", "sub", "in", "subjects", "]", "return", "lst" ]
Convert a MultiIndex df to list Parameters ---------- df : pandas.DataFrame A MultiIndex DataFrame where the first level is subjects and the second level is lists (e.g. egg.pres) Returns ---------- lst : a list of lists of lists of values The input df reformatted as a list
[ "Convert", "a", "MultiIndex", "df", "to", "list" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L210-L233
ContextLab/quail
quail/helpers.py
fill_missing
def fill_missing(x): """ Fills in missing lists (assumes end lists are missing) """ # find subject with max number of lists maxlen = max([len(xi) for xi in x]) subs = [] for sub in x: if len(sub)<maxlen: for i in range(maxlen-len(sub)): sub.append([]) new_sub = sub else: new_sub = sub subs.append(new_sub) return subs
python
def fill_missing(x): """ Fills in missing lists (assumes end lists are missing) """ # find subject with max number of lists maxlen = max([len(xi) for xi in x]) subs = [] for sub in x: if len(sub)<maxlen: for i in range(maxlen-len(sub)): sub.append([]) new_sub = sub else: new_sub = sub subs.append(new_sub) return subs
[ "def", "fill_missing", "(", "x", ")", ":", "# find subject with max number of lists", "maxlen", "=", "max", "(", "[", "len", "(", "xi", ")", "for", "xi", "in", "x", "]", ")", "subs", "=", "[", "]", "for", "sub", "in", "x", ":", "if", "len", "(", "sub", ")", "<", "maxlen", ":", "for", "i", "in", "range", "(", "maxlen", "-", "len", "(", "sub", ")", ")", ":", "sub", ".", "append", "(", "[", "]", ")", "new_sub", "=", "sub", "else", ":", "new_sub", "=", "sub", "subs", ".", "append", "(", "new_sub", ")", "return", "subs" ]
Fills in missing lists (assumes end lists are missing)
[ "Fills", "in", "missing", "lists", "(", "assumes", "end", "lists", "are", "missing", ")" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L235-L253
ContextLab/quail
quail/helpers.py
parse_egg
def parse_egg(egg): """Parses an egg and returns fields""" pres_list = egg.get_pres_items().values[0] rec_list = egg.get_rec_items().values[0] feature_list = egg.get_pres_features().values[0] dist_funcs = egg.dist_funcs return pres_list, rec_list, feature_list, dist_funcs
python
def parse_egg(egg): """Parses an egg and returns fields""" pres_list = egg.get_pres_items().values[0] rec_list = egg.get_rec_items().values[0] feature_list = egg.get_pres_features().values[0] dist_funcs = egg.dist_funcs return pres_list, rec_list, feature_list, dist_funcs
[ "def", "parse_egg", "(", "egg", ")", ":", "pres_list", "=", "egg", ".", "get_pres_items", "(", ")", ".", "values", "[", "0", "]", "rec_list", "=", "egg", ".", "get_rec_items", "(", ")", ".", "values", "[", "0", "]", "feature_list", "=", "egg", ".", "get_pres_features", "(", ")", ".", "values", "[", "0", "]", "dist_funcs", "=", "egg", ".", "dist_funcs", "return", "pres_list", ",", "rec_list", ",", "feature_list", ",", "dist_funcs" ]
Parses an egg and returns fields
[ "Parses", "an", "egg", "and", "returns", "fields" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L255-L261
ContextLab/quail
quail/helpers.py
merge_pres_feats
def merge_pres_feats(pres, features): """ Helper function to merge pres and features to support legacy features argument """ sub = [] for psub, fsub in zip(pres, features): exp = [] for pexp, fexp in zip(psub, fsub): lst = [] for p, f in zip(pexp, fexp): p.update(f) lst.append(p) exp.append(lst) sub.append(exp) return sub
python
def merge_pres_feats(pres, features): """ Helper function to merge pres and features to support legacy features argument """ sub = [] for psub, fsub in zip(pres, features): exp = [] for pexp, fexp in zip(psub, fsub): lst = [] for p, f in zip(pexp, fexp): p.update(f) lst.append(p) exp.append(lst) sub.append(exp) return sub
[ "def", "merge_pres_feats", "(", "pres", ",", "features", ")", ":", "sub", "=", "[", "]", "for", "psub", ",", "fsub", "in", "zip", "(", "pres", ",", "features", ")", ":", "exp", "=", "[", "]", "for", "pexp", ",", "fexp", "in", "zip", "(", "psub", ",", "fsub", ")", ":", "lst", "=", "[", "]", "for", "p", ",", "f", "in", "zip", "(", "pexp", ",", "fexp", ")", ":", "p", ".", "update", "(", "f", ")", "lst", ".", "append", "(", "p", ")", "exp", ".", "append", "(", "lst", ")", "sub", ".", "append", "(", "exp", ")", "return", "sub" ]
Helper function to merge pres and features to support legacy features argument
[ "Helper", "function", "to", "merge", "pres", "and", "features", "to", "support", "legacy", "features", "argument" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L263-L278
ContextLab/quail
quail/helpers.py
r2z
def r2z(r): """ Function that calculates the Fisher z-transformation Parameters ---------- r : int or ndarray Correlation value Returns ---------- result : int or ndarray Fishers z transformed correlation value """ with np.errstate(invalid='ignore', divide='ignore'): return 0.5 * (np.log(1 + r) - np.log(1 - r))
python
def r2z(r): """ Function that calculates the Fisher z-transformation Parameters ---------- r : int or ndarray Correlation value Returns ---------- result : int or ndarray Fishers z transformed correlation value """ with np.errstate(invalid='ignore', divide='ignore'): return 0.5 * (np.log(1 + r) - np.log(1 - r))
[ "def", "r2z", "(", "r", ")", ":", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ",", "divide", "=", "'ignore'", ")", ":", "return", "0.5", "*", "(", "np", ".", "log", "(", "1", "+", "r", ")", "-", "np", ".", "log", "(", "1", "-", "r", ")", ")" ]
Function that calculates the Fisher z-transformation Parameters ---------- r : int or ndarray Correlation value Returns ---------- result : int or ndarray Fishers z transformed correlation value
[ "Function", "that", "calculates", "the", "Fisher", "z", "-", "transformation" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L287-L304
ContextLab/quail
quail/helpers.py
z2r
def z2r(z): """ Function that calculates the inverse Fisher z-transformation Parameters ---------- z : int or ndarray Fishers z transformed correlation value Returns ---------- result : int or ndarray Correlation value """ with np.errstate(invalid='ignore', divide='ignore'): return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
python
def z2r(z): """ Function that calculates the inverse Fisher z-transformation Parameters ---------- z : int or ndarray Fishers z transformed correlation value Returns ---------- result : int or ndarray Correlation value """ with np.errstate(invalid='ignore', divide='ignore'): return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
[ "def", "z2r", "(", "z", ")", ":", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ",", "divide", "=", "'ignore'", ")", ":", "return", "(", "np", ".", "exp", "(", "2", "*", "z", ")", "-", "1", ")", "/", "(", "np", ".", "exp", "(", "2", "*", "z", ")", "+", "1", ")" ]
Function that calculates the inverse Fisher z-transformation Parameters ---------- z : int or ndarray Fishers z transformed correlation value Returns ---------- result : int or ndarray Correlation value
[ "Function", "that", "calculates", "the", "inverse", "Fisher", "z", "-", "transformation" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L306-L323
ContextLab/quail
quail/helpers.py
shuffle_egg
def shuffle_egg(egg): """ Shuffle an Egg's recalls""" from .egg import Egg pres, rec, features, dist_funcs = parse_egg(egg) if pres.ndim==1: pres = pres.reshape(1, pres.shape[0]) rec = rec.reshape(1, rec.shape[0]) features = features.reshape(1, features.shape[0]) for ilist in range(rec.shape[0]): idx = np.random.permutation(rec.shape[1]) rec[ilist,:] = rec[ilist,idx] return Egg(pres=pres, rec=rec, features=features, dist_funcs=dist_funcs)
python
def shuffle_egg(egg): """ Shuffle an Egg's recalls""" from .egg import Egg pres, rec, features, dist_funcs = parse_egg(egg) if pres.ndim==1: pres = pres.reshape(1, pres.shape[0]) rec = rec.reshape(1, rec.shape[0]) features = features.reshape(1, features.shape[0]) for ilist in range(rec.shape[0]): idx = np.random.permutation(rec.shape[1]) rec[ilist,:] = rec[ilist,idx] return Egg(pres=pres, rec=rec, features=features, dist_funcs=dist_funcs)
[ "def", "shuffle_egg", "(", "egg", ")", ":", "from", ".", "egg", "import", "Egg", "pres", ",", "rec", ",", "features", ",", "dist_funcs", "=", "parse_egg", "(", "egg", ")", "if", "pres", ".", "ndim", "==", "1", ":", "pres", "=", "pres", ".", "reshape", "(", "1", ",", "pres", ".", "shape", "[", "0", "]", ")", "rec", "=", "rec", ".", "reshape", "(", "1", ",", "rec", ".", "shape", "[", "0", "]", ")", "features", "=", "features", ".", "reshape", "(", "1", ",", "features", ".", "shape", "[", "0", "]", ")", "for", "ilist", "in", "range", "(", "rec", ".", "shape", "[", "0", "]", ")", ":", "idx", "=", "np", ".", "random", ".", "permutation", "(", "rec", ".", "shape", "[", "1", "]", ")", "rec", "[", "ilist", ",", ":", "]", "=", "rec", "[", "ilist", ",", "idx", "]", "return", "Egg", "(", "pres", "=", "pres", ",", "rec", "=", "rec", ",", "features", "=", "features", ",", "dist_funcs", "=", "dist_funcs", ")" ]
Shuffle an Egg's recalls
[ "Shuffle", "an", "Egg", "s", "recalls" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/helpers.py#L335-L349
aouyar/PyMunin
pymunin/plugins/netifacestats.py
MuninNetIfacePlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" for iface in self._ifaceList: stats = self._ifaceStats.get(iface) graph_name = 'netiface_traffic_%s' % iface if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'rx', stats.get('rxbytes') * 8) self.setGraphVal(graph_name, 'tx', stats.get('txbytes') * 8) graph_name = 'netiface_errors_%s' % iface if self.hasGraph(graph_name): for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier', 'rxdrop', 'txdrop', 'rxfifo', 'txfifo'): self.setGraphVal(graph_name, field, stats.get(field))
python
def retrieveVals(self): """Retrieve values for graphs.""" for iface in self._ifaceList: stats = self._ifaceStats.get(iface) graph_name = 'netiface_traffic_%s' % iface if self.hasGraph(graph_name): self.setGraphVal(graph_name, 'rx', stats.get('rxbytes') * 8) self.setGraphVal(graph_name, 'tx', stats.get('txbytes') * 8) graph_name = 'netiface_errors_%s' % iface if self.hasGraph(graph_name): for field in ('rxerrs', 'txerrs', 'rxframe', 'txcarrier', 'rxdrop', 'txdrop', 'rxfifo', 'txfifo'): self.setGraphVal(graph_name, field, stats.get(field))
[ "def", "retrieveVals", "(", "self", ")", ":", "for", "iface", "in", "self", ".", "_ifaceList", ":", "stats", "=", "self", ".", "_ifaceStats", ".", "get", "(", "iface", ")", "graph_name", "=", "'netiface_traffic_%s'", "%", "iface", "if", "self", ".", "hasGraph", "(", "graph_name", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "'rx'", ",", "stats", ".", "get", "(", "'rxbytes'", ")", "*", "8", ")", "self", ".", "setGraphVal", "(", "graph_name", ",", "'tx'", ",", "stats", ".", "get", "(", "'txbytes'", ")", "*", "8", ")", "graph_name", "=", "'netiface_errors_%s'", "%", "iface", "if", "self", ".", "hasGraph", "(", "graph_name", ")", ":", "for", "field", "in", "(", "'rxerrs'", ",", "'txerrs'", ",", "'rxframe'", ",", "'txcarrier'", ",", "'rxdrop'", ",", "'txdrop'", ",", "'rxfifo'", ",", "'txfifo'", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "field", ",", "stats", ".", "get", "(", "field", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/netifacestats.py#L122-L134
aouyar/PyMunin
pysysinfo/ntp.py
NTPinfo.getPeerStats
def getPeerStats(self): """Get NTP Peer Stats for localhost by querying local NTP Server. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpqCmd, '-n', '-c', 'peers']) for line in output.splitlines(): mobj = re.match('\*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+', line) if mobj: info_dict['ip'] = mobj.group(1) cols = line.split() info_dict['stratum'] = int(cols[2]) info_dict['delay'] = float(cols[7]) / 1000.0 info_dict['offset'] = float(cols[8]) / 1000.0 info_dict['jitter'] = float(cols[9]) / 1000.0 return info_dict else: raise Exception("Execution of command failed: %s" % ntpqCmd) return info_dict
python
def getPeerStats(self): """Get NTP Peer Stats for localhost by querying local NTP Server. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpqCmd, '-n', '-c', 'peers']) for line in output.splitlines(): mobj = re.match('\*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+', line) if mobj: info_dict['ip'] = mobj.group(1) cols = line.split() info_dict['stratum'] = int(cols[2]) info_dict['delay'] = float(cols[7]) / 1000.0 info_dict['offset'] = float(cols[8]) / 1000.0 info_dict['jitter'] = float(cols[9]) / 1000.0 return info_dict else: raise Exception("Execution of command failed: %s" % ntpqCmd) return info_dict
[ "def", "getPeerStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "output", "=", "util", ".", "exec_command", "(", "[", "ntpqCmd", ",", "'-n'", ",", "'-c'", ",", "'peers'", "]", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'\\*(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\s+'", ",", "line", ")", "if", "mobj", ":", "info_dict", "[", "'ip'", "]", "=", "mobj", ".", "group", "(", "1", ")", "cols", "=", "line", ".", "split", "(", ")", "info_dict", "[", "'stratum'", "]", "=", "int", "(", "cols", "[", "2", "]", ")", "info_dict", "[", "'delay'", "]", "=", "float", "(", "cols", "[", "7", "]", ")", "/", "1000.0", "info_dict", "[", "'offset'", "]", "=", "float", "(", "cols", "[", "8", "]", ")", "/", "1000.0", "info_dict", "[", "'jitter'", "]", "=", "float", "(", "cols", "[", "9", "]", ")", "/", "1000.0", "return", "info_dict", "else", ":", "raise", "Exception", "(", "\"Execution of command failed: %s\"", "%", "ntpqCmd", ")", "return", "info_dict" ]
Get NTP Peer Stats for localhost by querying local NTP Server. @return: Dictionary of NTP stats converted to seconds.
[ "Get", "NTP", "Peer", "Stats", "for", "localhost", "by", "querying", "local", "NTP", "Server", ".", "@return", ":", "Dictionary", "of", "NTP", "stats", "converted", "to", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/ntp.py#L29-L49
aouyar/PyMunin
pysysinfo/ntp.py
NTPinfo.getHostOffset
def getHostOffset(self, host): """Get NTP Stats and offset of remote host relative to localhost by querying NTP Server on remote host. @param host: Remote Host IP. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpdateCmd, '-u', '-q', host]) for line in output.splitlines(): mobj = re.match('server.*,\s*stratum\s+(\d),.*' 'offset\s+([\d\.-]+),.*delay\s+([\d\.]+)\s*$', line) if mobj: info_dict['stratum'] = int(mobj.group(1)) info_dict['delay'] = float(mobj.group(3)) info_dict['offset'] = float(mobj.group(2)) return info_dict return info_dict
python
def getHostOffset(self, host): """Get NTP Stats and offset of remote host relative to localhost by querying NTP Server on remote host. @param host: Remote Host IP. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpdateCmd, '-u', '-q', host]) for line in output.splitlines(): mobj = re.match('server.*,\s*stratum\s+(\d),.*' 'offset\s+([\d\.-]+),.*delay\s+([\d\.]+)\s*$', line) if mobj: info_dict['stratum'] = int(mobj.group(1)) info_dict['delay'] = float(mobj.group(3)) info_dict['offset'] = float(mobj.group(2)) return info_dict return info_dict
[ "def", "getHostOffset", "(", "self", ",", "host", ")", ":", "info_dict", "=", "{", "}", "output", "=", "util", ".", "exec_command", "(", "[", "ntpdateCmd", ",", "'-u'", ",", "'-q'", ",", "host", "]", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'server.*,\\s*stratum\\s+(\\d),.*'", "'offset\\s+([\\d\\.-]+),.*delay\\s+([\\d\\.]+)\\s*$'", ",", "line", ")", "if", "mobj", ":", "info_dict", "[", "'stratum'", "]", "=", "int", "(", "mobj", ".", "group", "(", "1", ")", ")", "info_dict", "[", "'delay'", "]", "=", "float", "(", "mobj", ".", "group", "(", "3", ")", ")", "info_dict", "[", "'offset'", "]", "=", "float", "(", "mobj", ".", "group", "(", "2", ")", ")", "return", "info_dict", "return", "info_dict" ]
Get NTP Stats and offset of remote host relative to localhost by querying NTP Server on remote host. @param host: Remote Host IP. @return: Dictionary of NTP stats converted to seconds.
[ "Get", "NTP", "Stats", "and", "offset", "of", "remote", "host", "relative", "to", "localhost", "by", "querying", "NTP", "Server", "on", "remote", "host", ".", "@param", "host", ":", "Remote", "Host", "IP", ".", "@return", ":", "Dictionary", "of", "NTP", "stats", "converted", "to", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/ntp.py#L51-L70
aouyar/PyMunin
pysysinfo/ntp.py
NTPinfo.getHostOffsets
def getHostOffsets(self, hosts): """Get NTP Stats and offset of multiple remote hosts relative to localhost by querying NTP Servers on remote hosts. @param host: List of Remote Host IPs. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpdateCmd, '-u', '-q'] + list(hosts)) for line in output.splitlines(): mobj = re.match('server\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}),' '\s*stratum\s+(\d),.*offset\s+([\d\.-]+),' '.*delay\s+([\d\.]+)\s*$', line) if mobj: host_dict = {} host = mobj.group(1) host_dict['stratum'] = int(mobj.group(2)) host_dict['delay'] = float(mobj.group(4)) host_dict['offset'] = float(mobj.group(3)) info_dict[host] = host_dict return info_dict
python
def getHostOffsets(self, hosts): """Get NTP Stats and offset of multiple remote hosts relative to localhost by querying NTP Servers on remote hosts. @param host: List of Remote Host IPs. @return: Dictionary of NTP stats converted to seconds. """ info_dict = {} output = util.exec_command([ntpdateCmd, '-u', '-q'] + list(hosts)) for line in output.splitlines(): mobj = re.match('server\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}),' '\s*stratum\s+(\d),.*offset\s+([\d\.-]+),' '.*delay\s+([\d\.]+)\s*$', line) if mobj: host_dict = {} host = mobj.group(1) host_dict['stratum'] = int(mobj.group(2)) host_dict['delay'] = float(mobj.group(4)) host_dict['offset'] = float(mobj.group(3)) info_dict[host] = host_dict return info_dict
[ "def", "getHostOffsets", "(", "self", ",", "hosts", ")", ":", "info_dict", "=", "{", "}", "output", "=", "util", ".", "exec_command", "(", "[", "ntpdateCmd", ",", "'-u'", ",", "'-q'", "]", "+", "list", "(", "hosts", ")", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'server\\s+(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}),'", "'\\s*stratum\\s+(\\d),.*offset\\s+([\\d\\.-]+),'", "'.*delay\\s+([\\d\\.]+)\\s*$'", ",", "line", ")", "if", "mobj", ":", "host_dict", "=", "{", "}", "host", "=", "mobj", ".", "group", "(", "1", ")", "host_dict", "[", "'stratum'", "]", "=", "int", "(", "mobj", ".", "group", "(", "2", ")", ")", "host_dict", "[", "'delay'", "]", "=", "float", "(", "mobj", ".", "group", "(", "4", ")", ")", "host_dict", "[", "'offset'", "]", "=", "float", "(", "mobj", ".", "group", "(", "3", ")", ")", "info_dict", "[", "host", "]", "=", "host_dict", "return", "info_dict" ]
Get NTP Stats and offset of multiple remote hosts relative to localhost by querying NTP Servers on remote hosts. @param host: List of Remote Host IPs. @return: Dictionary of NTP stats converted to seconds.
[ "Get", "NTP", "Stats", "and", "offset", "of", "multiple", "remote", "hosts", "relative", "to", "localhost", "by", "querying", "NTP", "Servers", "on", "remote", "hosts", ".", "@param", "host", ":", "List", "of", "Remote", "Host", "IPs", ".", "@return", ":", "Dictionary", "of", "NTP", "stats", "converted", "to", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/ntp.py#L72-L93
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getUptime
def getUptime(self): """Return system uptime in seconds. @return: Float that represents uptime in seconds. """ try: fp = open(uptimeFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % uptimeFile) return float(line.split()[0])
python
def getUptime(self): """Return system uptime in seconds. @return: Float that represents uptime in seconds. """ try: fp = open(uptimeFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % uptimeFile) return float(line.split()[0])
[ "def", "getUptime", "(", "self", ")", ":", "try", ":", "fp", "=", "open", "(", "uptimeFile", ",", "'r'", ")", "line", "=", "fp", ".", "readline", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "uptimeFile", ")", "return", "float", "(", "line", ".", "split", "(", ")", "[", "0", "]", ")" ]
Return system uptime in seconds. @return: Float that represents uptime in seconds.
[ "Return", "system", "uptime", "in", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L46-L58
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getLoadAvg
def getLoadAvg(self): """Return system Load Average. @return: List of 1 min, 5 min and 15 min Load Average figures. """ try: fp = open(loadavgFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % loadavgFile) arr = line.split() if len(arr) >= 3: return [float(col) for col in arr[:3]] else: return None
python
def getLoadAvg(self): """Return system Load Average. @return: List of 1 min, 5 min and 15 min Load Average figures. """ try: fp = open(loadavgFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % loadavgFile) arr = line.split() if len(arr) >= 3: return [float(col) for col in arr[:3]] else: return None
[ "def", "getLoadAvg", "(", "self", ")", ":", "try", ":", "fp", "=", "open", "(", "loadavgFile", ",", "'r'", ")", "line", "=", "fp", ".", "readline", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "loadavgFile", ")", "arr", "=", "line", ".", "split", "(", ")", "if", "len", "(", "arr", ")", ">=", "3", ":", "return", "[", "float", "(", "col", ")", "for", "col", "in", "arr", "[", ":", "3", "]", "]", "else", ":", "return", "None" ]
Return system Load Average. @return: List of 1 min, 5 min and 15 min Load Average figures.
[ "Return", "system", "Load", "Average", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L60-L76
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getCPUuse
def getCPUuse(self): """Return cpu time utilization in seconds. @return: Dictionary of stats. """ hz = os.sysconf('SC_CLK_TCK') info_dict = {} try: fp = open(cpustatFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest'] arr = line.split() if len(arr) > 1 and arr[0] == 'cpu': return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]])) return info_dict
python
def getCPUuse(self): """Return cpu time utilization in seconds. @return: Dictionary of stats. """ hz = os.sysconf('SC_CLK_TCK') info_dict = {} try: fp = open(cpustatFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest'] arr = line.split() if len(arr) > 1 and arr[0] == 'cpu': return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]])) return info_dict
[ "def", "getCPUuse", "(", "self", ")", ":", "hz", "=", "os", ".", "sysconf", "(", "'SC_CLK_TCK'", ")", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "cpustatFile", ",", "'r'", ")", "line", "=", "fp", ".", "readline", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "cpustatFile", ")", "headers", "=", "[", "'user'", ",", "'nice'", ",", "'system'", ",", "'idle'", ",", "'iowait'", ",", "'irq'", ",", "'softirq'", ",", "'steal'", ",", "'guest'", "]", "arr", "=", "line", ".", "split", "(", ")", "if", "len", "(", "arr", ")", ">", "1", "and", "arr", "[", "0", "]", "==", "'cpu'", ":", "return", "dict", "(", "zip", "(", "headers", "[", "0", ":", "len", "(", "arr", ")", "]", ",", "[", "(", "float", "(", "t", ")", "/", "hz", ")", "for", "t", "in", "arr", "[", "1", ":", "]", "]", ")", ")", "return", "info_dict" ]
Return cpu time utilization in seconds. @return: Dictionary of stats.
[ "Return", "cpu", "time", "utilization", "in", "seconds", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L78-L96
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getProcessStats
def getProcessStats(self): """Return stats for running and blocked processes, forks, context switches and interrupts. @return: Dictionary of stats. """ info_dict = {} try: fp = open(cpustatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) for line in data.splitlines(): arr = line.split() if len(arr) > 1 and arr[0] in ('ctxt', 'intr', 'softirq', 'processes', 'procs_running', 'procs_blocked'): info_dict[arr[0]] = arr[1] return info_dict
python
def getProcessStats(self): """Return stats for running and blocked processes, forks, context switches and interrupts. @return: Dictionary of stats. """ info_dict = {} try: fp = open(cpustatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) for line in data.splitlines(): arr = line.split() if len(arr) > 1 and arr[0] in ('ctxt', 'intr', 'softirq', 'processes', 'procs_running', 'procs_blocked'): info_dict[arr[0]] = arr[1] return info_dict
[ "def", "getProcessStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "cpustatFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "cpustatFile", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "arr", "=", "line", ".", "split", "(", ")", "if", "len", "(", "arr", ")", ">", "1", "and", "arr", "[", "0", "]", "in", "(", "'ctxt'", ",", "'intr'", ",", "'softirq'", ",", "'processes'", ",", "'procs_running'", ",", "'procs_blocked'", ")", ":", "info_dict", "[", "arr", "[", "0", "]", "]", "=", "arr", "[", "1", "]", "return", "info_dict" ]
Return stats for running and blocked processes, forks, context switches and interrupts. @return: Dictionary of stats.
[ "Return", "stats", "for", "running", "and", "blocked", "processes", "forks", "context", "switches", "and", "interrupts", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L98-L118
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getMemoryUse
def getMemoryUse(self): """Return stats for memory utilization. @return: Dictionary of stats. """ info_dict = {} try: fp = open(meminfoFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % meminfoFile) for line in data.splitlines(): mobj = re.match('^(.+):\s*(\d+)\s*(\w+|)\s*$', line) if mobj: if mobj.group(3).lower() == 'kb': mult = 1024 else: mult = 1 info_dict[mobj.group(1)] = int(mobj.group(2)) * mult return info_dict
python
def getMemoryUse(self): """Return stats for memory utilization. @return: Dictionary of stats. """ info_dict = {} try: fp = open(meminfoFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % meminfoFile) for line in data.splitlines(): mobj = re.match('^(.+):\s*(\d+)\s*(\w+|)\s*$', line) if mobj: if mobj.group(3).lower() == 'kb': mult = 1024 else: mult = 1 info_dict[mobj.group(1)] = int(mobj.group(2)) * mult return info_dict
[ "def", "getMemoryUse", "(", "self", ")", ":", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "meminfoFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "meminfoFile", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'^(.+):\\s*(\\d+)\\s*(\\w+|)\\s*$'", ",", "line", ")", "if", "mobj", ":", "if", "mobj", ".", "group", "(", "3", ")", ".", "lower", "(", ")", "==", "'kb'", ":", "mult", "=", "1024", "else", ":", "mult", "=", "1", "info_dict", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "int", "(", "mobj", ".", "group", "(", "2", ")", ")", "*", "mult", "return", "info_dict" ]
Return stats for memory utilization. @return: Dictionary of stats.
[ "Return", "stats", "for", "memory", "utilization", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L120-L141
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getSwapStats
def getSwapStats(self): """Return information on swap partition and / or files. @return: Dictionary of stats. """ info_dict = {} try: fp = open(swapsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % swapsFile) lines = data.splitlines() if len(lines) > 1: colnames = [name.lower() for name in lines[0].split()] for line in lines[1:]: cols = line.split() info_dict[cols[0]] = dict(zip(colnames[1:], cols[1:])) return info_dict
python
def getSwapStats(self): """Return information on swap partition and / or files. @return: Dictionary of stats. """ info_dict = {} try: fp = open(swapsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % swapsFile) lines = data.splitlines() if len(lines) > 1: colnames = [name.lower() for name in lines[0].split()] for line in lines[1:]: cols = line.split() info_dict[cols[0]] = dict(zip(colnames[1:], cols[1:])) return info_dict
[ "def", "getSwapStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "swapsFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "swapsFile", ")", "lines", "=", "data", ".", "splitlines", "(", ")", "if", "len", "(", "lines", ")", ">", "1", ":", "colnames", "=", "[", "name", ".", "lower", "(", ")", "for", "name", "in", "lines", "[", "0", "]", ".", "split", "(", ")", "]", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "cols", "=", "line", ".", "split", "(", ")", "info_dict", "[", "cols", "[", "0", "]", "]", "=", "dict", "(", "zip", "(", "colnames", "[", "1", ":", "]", ",", "cols", "[", "1", ":", "]", ")", ")", "return", "info_dict" ]
Return information on swap partition and / or files. @return: Dictionary of stats.
[ "Return", "information", "on", "swap", "partition", "and", "/", "or", "files", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L143-L162
aouyar/PyMunin
pysysinfo/system.py
SystemInfo.getVMstats
def getVMstats(self): """Return stats for Virtual Memory Subsystem. @return: Dictionary of stats. """ info_dict = {} try: fp = open(vmstatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % vmstatFile) for line in data.splitlines(): cols = line.split() if len(cols) == 2: info_dict[cols[0]] = cols[1] return info_dict
python
def getVMstats(self): """Return stats for Virtual Memory Subsystem. @return: Dictionary of stats. """ info_dict = {} try: fp = open(vmstatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % vmstatFile) for line in data.splitlines(): cols = line.split() if len(cols) == 2: info_dict[cols[0]] = cols[1] return info_dict
[ "def", "getVMstats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "try", ":", "fp", "=", "open", "(", "vmstatFile", ",", "'r'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "except", ":", "raise", "IOError", "(", "'Failed reading stats from file: %s'", "%", "vmstatFile", ")", "for", "line", "in", "data", ".", "splitlines", "(", ")", ":", "cols", "=", "line", ".", "split", "(", ")", "if", "len", "(", "cols", ")", "==", "2", ":", "info_dict", "[", "cols", "[", "0", "]", "]", "=", "cols", "[", "1", "]", "return", "info_dict" ]
Return stats for Virtual Memory Subsystem. @return: Dictionary of stats.
[ "Return", "stats", "for", "Virtual", "Memory", "Subsystem", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L164-L181
aouyar/PyMunin
pysysinfo/memcached.py
MemcachedInfo._connect
def _connect(self): """Connect to Memcached.""" if self._socketFile is not None: if not os.path.exists(self._socketFile): raise Exception("Socket file (%s) for Memcached Instance not found." % self._socketFile) try: if self._timeout is not None: self._conn = util.Telnet(self._host, self._port, self._socketFile, timeout) else: self._conn = util.Telnet(self._host, self._port, self._socketFile) except: raise Exception("Connection to %s failed." % self._instanceName)
python
def _connect(self): """Connect to Memcached.""" if self._socketFile is not None: if not os.path.exists(self._socketFile): raise Exception("Socket file (%s) for Memcached Instance not found." % self._socketFile) try: if self._timeout is not None: self._conn = util.Telnet(self._host, self._port, self._socketFile, timeout) else: self._conn = util.Telnet(self._host, self._port, self._socketFile) except: raise Exception("Connection to %s failed." % self._instanceName)
[ "def", "_connect", "(", "self", ")", ":", "if", "self", ".", "_socketFile", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_socketFile", ")", ":", "raise", "Exception", "(", "\"Socket file (%s) for Memcached Instance not found.\"", "%", "self", ".", "_socketFile", ")", "try", ":", "if", "self", ".", "_timeout", "is", "not", "None", ":", "self", ".", "_conn", "=", "util", ".", "Telnet", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_socketFile", ",", "timeout", ")", "else", ":", "self", ".", "_conn", "=", "util", ".", "Telnet", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_socketFile", ")", "except", ":", "raise", "Exception", "(", "\"Connection to %s failed.\"", "%", "self", ".", "_instanceName", ")" ]
Connect to Memcached.
[ "Connect", "to", "Memcached", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/memcached.py#L66-L79
aouyar/PyMunin
pysysinfo/memcached.py
MemcachedInfo._sendStatCmd
def _sendStatCmd(self, cmd): """Send stat command to Memcached Server and return response lines. @param cmd: Command string. @return: Array of strings. """ try: self._conn.write("%s\r\n" % cmd) regex = re.compile('^(END|ERROR)\r\n', re.MULTILINE) (idx, mobj, text) = self._conn.expect([regex,], self._timeout) #@UnusedVariable except: raise Exception("Communication with %s failed" % self._instanceName) if mobj is not None: if mobj.group(1) == 'END': return text.splitlines()[:-1] elif mobj.group(1) == 'ERROR': raise Exception("Protocol error in communication with %s." % self._instanceName) else: raise Exception("Connection with %s timed out." % self._instanceName)
python
def _sendStatCmd(self, cmd): """Send stat command to Memcached Server and return response lines. @param cmd: Command string. @return: Array of strings. """ try: self._conn.write("%s\r\n" % cmd) regex = re.compile('^(END|ERROR)\r\n', re.MULTILINE) (idx, mobj, text) = self._conn.expect([regex,], self._timeout) #@UnusedVariable except: raise Exception("Communication with %s failed" % self._instanceName) if mobj is not None: if mobj.group(1) == 'END': return text.splitlines()[:-1] elif mobj.group(1) == 'ERROR': raise Exception("Protocol error in communication with %s." % self._instanceName) else: raise Exception("Connection with %s timed out." % self._instanceName)
[ "def", "_sendStatCmd", "(", "self", ",", "cmd", ")", ":", "try", ":", "self", ".", "_conn", ".", "write", "(", "\"%s\\r\\n\"", "%", "cmd", ")", "regex", "=", "re", ".", "compile", "(", "'^(END|ERROR)\\r\\n'", ",", "re", ".", "MULTILINE", ")", "(", "idx", ",", "mobj", ",", "text", ")", "=", "self", ".", "_conn", ".", "expect", "(", "[", "regex", ",", "]", ",", "self", ".", "_timeout", ")", "#@UnusedVariable", "except", ":", "raise", "Exception", "(", "\"Communication with %s failed\"", "%", "self", ".", "_instanceName", ")", "if", "mobj", "is", "not", "None", ":", "if", "mobj", ".", "group", "(", "1", ")", "==", "'END'", ":", "return", "text", ".", "splitlines", "(", ")", "[", ":", "-", "1", "]", "elif", "mobj", ".", "group", "(", "1", ")", "==", "'ERROR'", ":", "raise", "Exception", "(", "\"Protocol error in communication with %s.\"", "%", "self", ".", "_instanceName", ")", "else", ":", "raise", "Exception", "(", "\"Connection with %s timed out.\"", "%", "self", ".", "_instanceName", ")" ]
Send stat command to Memcached Server and return response lines. @param cmd: Command string. @return: Array of strings.
[ "Send", "stat", "command", "to", "Memcached", "Server", "and", "return", "response", "lines", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/memcached.py#L81-L101
aouyar/PyMunin
pysysinfo/memcached.py
MemcachedInfo._parseStats
def _parseStats(self, lines, parse_slabs = False): """Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary. """ info_dict = {} info_dict['slabs'] = {} for line in lines: mobj = re.match('^STAT\s(\w+)\s(\S+)$', line) if mobj: info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True) continue elif parse_slabs: mobj = re.match('STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$', line) if mobj: (slab, key, val) = mobj.groups()[-3:] if not info_dict['slabs'].has_key(slab): info_dict['slabs'][slab] = {} info_dict['slabs'][slab][key] = util.parse_value(val, True) return info_dict
python
def _parseStats(self, lines, parse_slabs = False): """Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary. """ info_dict = {} info_dict['slabs'] = {} for line in lines: mobj = re.match('^STAT\s(\w+)\s(\S+)$', line) if mobj: info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True) continue elif parse_slabs: mobj = re.match('STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$', line) if mobj: (slab, key, val) = mobj.groups()[-3:] if not info_dict['slabs'].has_key(slab): info_dict['slabs'][slab] = {} info_dict['slabs'][slab][key] = util.parse_value(val, True) return info_dict
[ "def", "_parseStats", "(", "self", ",", "lines", ",", "parse_slabs", "=", "False", ")", ":", "info_dict", "=", "{", "}", "info_dict", "[", "'slabs'", "]", "=", "{", "}", "for", "line", "in", "lines", ":", "mobj", "=", "re", ".", "match", "(", "'^STAT\\s(\\w+)\\s(\\S+)$'", ",", "line", ")", "if", "mobj", ":", "info_dict", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ",", "True", ")", "continue", "elif", "parse_slabs", ":", "mobj", "=", "re", ".", "match", "(", "'STAT\\s(\\w+:)?(\\d+):(\\w+)\\s(\\S+)$'", ",", "line", ")", "if", "mobj", ":", "(", "slab", ",", "key", ",", "val", ")", "=", "mobj", ".", "groups", "(", ")", "[", "-", "3", ":", "]", "if", "not", "info_dict", "[", "'slabs'", "]", ".", "has_key", "(", "slab", ")", ":", "info_dict", "[", "'slabs'", "]", "[", "slab", "]", "=", "{", "}", "info_dict", "[", "'slabs'", "]", "[", "slab", "]", "[", "key", "]", "=", "util", ".", "parse_value", "(", "val", ",", "True", ")", "return", "info_dict" ]
Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary.
[ "Parse", "stats", "output", "from", "memcached", "and", "return", "dictionary", "of", "stats", "-" ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/memcached.py#L102-L124
aouyar/PyMunin
pymunin/plugins/memcachedstats.py
MuninMemcachedPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self._stats is None: serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) stats = serverInfo.getStats() else: stats = self._stats if stats is None: raise Exception("Undetermined error accesing stats.") stats['set_hits'] = stats.get('total_items') if stats.has_key('cmd_set') and stats.has_key('total_items'): stats['set_misses'] = stats['cmd_set'] - stats['total_items'] self.saveState(stats) if self.hasGraph('memcached_connections'): self.setGraphVal('memcached_connections', 'conn', stats.get('curr_connections')) if self.hasGraph('memcached_items'): self.setGraphVal('memcached_items', 'items', stats.get('curr_items')) if self.hasGraph('memcached_memory'): self.setGraphVal('memcached_memory', 'bytes', stats.get('bytes')) if self.hasGraph('memcached_connrate'): self.setGraphVal('memcached_connrate', 'conn', stats.get('total_connections')) if self.hasGraph('memcached_traffic'): self.setGraphVal('memcached_traffic', 'rxbytes', stats.get('bytes_read')) self.setGraphVal('memcached_traffic', 'txbytes', stats.get('bytes_written')) if self.hasGraph('memcached_reqrate'): self.setGraphVal('memcached_reqrate', 'set', stats.get('cmd_set')) self.setGraphVal('memcached_reqrate', 'get', stats.get('cmd_get')) if self.graphHasField('memcached_reqrate', 'del'): self.setGraphVal('memcached_reqrate', 'del', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.graphHasField('memcached_reqrate', 'cas'): self.setGraphVal('memcached_reqrate', 'cas', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.graphHasField('memcached_reqrate', 'incr'): self.setGraphVal('memcached_reqrate', 'incr', safe_sum([stats.get('incr_hits'), stats.get('incr_misses')])) if self.graphHasField('memcached_reqrate', 'decr'): self.setGraphVal('memcached_reqrate', 'decr', safe_sum([stats.get('decr_hits'), stats.get('decr_misses')])) if self.hasGraph('memcached_statget'): self.setGraphVal('memcached_statget', 'hit', stats.get('get_hits')) self.setGraphVal('memcached_statget', 'miss', stats.get('get_misses')) self.setGraphVal('memcached_statget', 'total', safe_sum([stats.get('get_hits'), stats.get('get_misses')])) if self.hasGraph('memcached_statset'): self.setGraphVal('memcached_statset', 'hit', stats.get('set_hits')) self.setGraphVal('memcached_statset', 'miss', stats.get('set_misses')) self.setGraphVal('memcached_statset', 'total', safe_sum([stats.get('set_hits'), stats.get('set_misses')])) if self.hasGraph('memcached_statdel'): self.setGraphVal('memcached_statdel', 'hit', stats.get('delete_hits')) self.setGraphVal('memcached_statdel', 'miss', stats.get('delete_misses')) self.setGraphVal('memcached_statdel', 'total', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.hasGraph('memcached_statcas'): self.setGraphVal('memcached_statcas', 'hit', stats.get('cas_hits')) self.setGraphVal('memcached_statcas', 'miss', stats.get('cas_misses')) self.setGraphVal('memcached_statcas', 'badval', stats.get('cas_badval')) self.setGraphVal('memcached_statcas', 'total', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.hasGraph('memcached_statincrdecr'): self.setGraphVal('memcached_statincrdecr', 'incr_hit', stats.get('incr_hits')) self.setGraphVal('memcached_statincrdecr', 'decr_hit', stats.get('decr_hits')) self.setGraphVal('memcached_statincrdecr', 'incr_miss', stats.get('incr_misses')) self.setGraphVal('memcached_statincrdecr', 'decr_miss', stats.get('decr_misses')) self.setGraphVal('memcached_statincrdecr', 'total', safe_sum([stats.get('incr_hits'), stats.get('decr_hits'), stats.get('incr_misses'), stats.get('decr_misses')])) if self.hasGraph('memcached_statevict'): self.setGraphVal('memcached_statevict', 'evict', stats.get('evictions')) if self.graphHasField('memcached_statevict', 'reclaim'): self.setGraphVal('memcached_statevict', 'reclaim', stats.get('reclaimed')) if self.hasGraph('memcached_statauth'): self.setGraphVal('memcached_statauth', 'reqs', stats.get('auth_cmds')) self.setGraphVal('memcached_statauth', 'errors', stats.get('auth_errors')) if self.hasGraph('memcached_hitpct'): prev_stats = self._prev_stats for (field_name, field_hits, field_misses) in ( ('set', 'set_hits', 'set_misses'), ('get', 'get_hits', 'get_misses'), ('del', 'delete_hits', 'delete_misses'), ('cas', 'cas_hits', 'cas_misses'), ('incr', 'incr_hits', 'incr_misses'), ('decr', 'decr_hits', 'decr_misses') ): if prev_stats: if (stats.has_key(field_hits) and prev_stats.has_key(field_hits) and stats.has_key(field_misses) and prev_stats.has_key(field_misses)): hits = stats[field_hits] - prev_stats[field_hits] misses = stats[field_misses] - prev_stats[field_misses] total = hits + misses if total > 0: val = 100.0 * hits / total else: val = 0 self.setGraphVal('memcached_hitpct', field_name, round(val, 2))
python
def retrieveVals(self): """Retrieve values for graphs.""" if self._stats is None: serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) stats = serverInfo.getStats() else: stats = self._stats if stats is None: raise Exception("Undetermined error accesing stats.") stats['set_hits'] = stats.get('total_items') if stats.has_key('cmd_set') and stats.has_key('total_items'): stats['set_misses'] = stats['cmd_set'] - stats['total_items'] self.saveState(stats) if self.hasGraph('memcached_connections'): self.setGraphVal('memcached_connections', 'conn', stats.get('curr_connections')) if self.hasGraph('memcached_items'): self.setGraphVal('memcached_items', 'items', stats.get('curr_items')) if self.hasGraph('memcached_memory'): self.setGraphVal('memcached_memory', 'bytes', stats.get('bytes')) if self.hasGraph('memcached_connrate'): self.setGraphVal('memcached_connrate', 'conn', stats.get('total_connections')) if self.hasGraph('memcached_traffic'): self.setGraphVal('memcached_traffic', 'rxbytes', stats.get('bytes_read')) self.setGraphVal('memcached_traffic', 'txbytes', stats.get('bytes_written')) if self.hasGraph('memcached_reqrate'): self.setGraphVal('memcached_reqrate', 'set', stats.get('cmd_set')) self.setGraphVal('memcached_reqrate', 'get', stats.get('cmd_get')) if self.graphHasField('memcached_reqrate', 'del'): self.setGraphVal('memcached_reqrate', 'del', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.graphHasField('memcached_reqrate', 'cas'): self.setGraphVal('memcached_reqrate', 'cas', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.graphHasField('memcached_reqrate', 'incr'): self.setGraphVal('memcached_reqrate', 'incr', safe_sum([stats.get('incr_hits'), stats.get('incr_misses')])) if self.graphHasField('memcached_reqrate', 'decr'): self.setGraphVal('memcached_reqrate', 'decr', safe_sum([stats.get('decr_hits'), stats.get('decr_misses')])) if self.hasGraph('memcached_statget'): self.setGraphVal('memcached_statget', 'hit', stats.get('get_hits')) self.setGraphVal('memcached_statget', 'miss', stats.get('get_misses')) self.setGraphVal('memcached_statget', 'total', safe_sum([stats.get('get_hits'), stats.get('get_misses')])) if self.hasGraph('memcached_statset'): self.setGraphVal('memcached_statset', 'hit', stats.get('set_hits')) self.setGraphVal('memcached_statset', 'miss', stats.get('set_misses')) self.setGraphVal('memcached_statset', 'total', safe_sum([stats.get('set_hits'), stats.get('set_misses')])) if self.hasGraph('memcached_statdel'): self.setGraphVal('memcached_statdel', 'hit', stats.get('delete_hits')) self.setGraphVal('memcached_statdel', 'miss', stats.get('delete_misses')) self.setGraphVal('memcached_statdel', 'total', safe_sum([stats.get('delete_hits'), stats.get('delete_misses')])) if self.hasGraph('memcached_statcas'): self.setGraphVal('memcached_statcas', 'hit', stats.get('cas_hits')) self.setGraphVal('memcached_statcas', 'miss', stats.get('cas_misses')) self.setGraphVal('memcached_statcas', 'badval', stats.get('cas_badval')) self.setGraphVal('memcached_statcas', 'total', safe_sum([stats.get('cas_hits'), stats.get('cas_misses'), stats.get('cas_badval')])) if self.hasGraph('memcached_statincrdecr'): self.setGraphVal('memcached_statincrdecr', 'incr_hit', stats.get('incr_hits')) self.setGraphVal('memcached_statincrdecr', 'decr_hit', stats.get('decr_hits')) self.setGraphVal('memcached_statincrdecr', 'incr_miss', stats.get('incr_misses')) self.setGraphVal('memcached_statincrdecr', 'decr_miss', stats.get('decr_misses')) self.setGraphVal('memcached_statincrdecr', 'total', safe_sum([stats.get('incr_hits'), stats.get('decr_hits'), stats.get('incr_misses'), stats.get('decr_misses')])) if self.hasGraph('memcached_statevict'): self.setGraphVal('memcached_statevict', 'evict', stats.get('evictions')) if self.graphHasField('memcached_statevict', 'reclaim'): self.setGraphVal('memcached_statevict', 'reclaim', stats.get('reclaimed')) if self.hasGraph('memcached_statauth'): self.setGraphVal('memcached_statauth', 'reqs', stats.get('auth_cmds')) self.setGraphVal('memcached_statauth', 'errors', stats.get('auth_errors')) if self.hasGraph('memcached_hitpct'): prev_stats = self._prev_stats for (field_name, field_hits, field_misses) in ( ('set', 'set_hits', 'set_misses'), ('get', 'get_hits', 'get_misses'), ('del', 'delete_hits', 'delete_misses'), ('cas', 'cas_hits', 'cas_misses'), ('incr', 'incr_hits', 'incr_misses'), ('decr', 'decr_hits', 'decr_misses') ): if prev_stats: if (stats.has_key(field_hits) and prev_stats.has_key(field_hits) and stats.has_key(field_misses) and prev_stats.has_key(field_misses)): hits = stats[field_hits] - prev_stats[field_hits] misses = stats[field_misses] - prev_stats[field_misses] total = hits + misses if total > 0: val = 100.0 * hits / total else: val = 0 self.setGraphVal('memcached_hitpct', field_name, round(val, 2))
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "_stats", "is", "None", ":", "serverInfo", "=", "MemcachedInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_socket_file", ")", "stats", "=", "serverInfo", ".", "getStats", "(", ")", "else", ":", "stats", "=", "self", ".", "_stats", "if", "stats", "is", "None", ":", "raise", "Exception", "(", "\"Undetermined error accesing stats.\"", ")", "stats", "[", "'set_hits'", "]", "=", "stats", ".", "get", "(", "'total_items'", ")", "if", "stats", ".", "has_key", "(", "'cmd_set'", ")", "and", "stats", ".", "has_key", "(", "'total_items'", ")", ":", "stats", "[", "'set_misses'", "]", "=", "stats", "[", "'cmd_set'", "]", "-", "stats", "[", "'total_items'", "]", "self", ".", "saveState", "(", "stats", ")", "if", "self", ".", "hasGraph", "(", "'memcached_connections'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_connections'", ",", "'conn'", ",", "stats", ".", "get", "(", "'curr_connections'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_items'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_items'", ",", "'items'", ",", "stats", ".", "get", "(", "'curr_items'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_memory'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_memory'", ",", "'bytes'", ",", "stats", ".", "get", "(", "'bytes'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_connrate'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_connrate'", ",", "'conn'", ",", "stats", ".", "get", "(", "'total_connections'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_traffic'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_traffic'", ",", "'rxbytes'", ",", "stats", ".", "get", "(", "'bytes_read'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_traffic'", ",", "'txbytes'", ",", "stats", ".", "get", "(", "'bytes_written'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_reqrate'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'set'", ",", "stats", ".", "get", "(", "'cmd_set'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'get'", ",", "stats", ".", "get", "(", "'cmd_get'", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'del'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'del'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'delete_hits'", ")", ",", "stats", ".", "get", "(", "'delete_misses'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'cas'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'cas'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'cas_hits'", ")", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ",", "stats", ".", "get", "(", "'cas_badval'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'incr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'incr'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'incr_hits'", ")", ",", "stats", ".", "get", "(", "'incr_misses'", ")", "]", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_reqrate'", ",", "'decr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_reqrate'", ",", "'decr'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'decr_hits'", ")", ",", "stats", ".", "get", "(", "'decr_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statget'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'hit'", ",", "stats", ".", "get", "(", "'get_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'miss'", ",", "stats", ".", "get", "(", "'get_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statget'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'get_hits'", ")", ",", "stats", ".", "get", "(", "'get_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statset'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'hit'", ",", "stats", ".", "get", "(", "'set_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'miss'", ",", "stats", ".", "get", "(", "'set_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statset'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'set_hits'", ")", ",", "stats", ".", "get", "(", "'set_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statdel'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'hit'", ",", "stats", ".", "get", "(", "'delete_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'miss'", ",", "stats", ".", "get", "(", "'delete_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statdel'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'delete_hits'", ")", ",", "stats", ".", "get", "(", "'delete_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statcas'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'hit'", ",", "stats", ".", "get", "(", "'cas_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'miss'", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'badval'", ",", "stats", ".", "get", "(", "'cas_badval'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statcas'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'cas_hits'", ")", ",", "stats", ".", "get", "(", "'cas_misses'", ")", ",", "stats", ".", "get", "(", "'cas_badval'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statincrdecr'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'incr_hit'", ",", "stats", ".", "get", "(", "'incr_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'decr_hit'", ",", "stats", ".", "get", "(", "'decr_hits'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'incr_miss'", ",", "stats", ".", "get", "(", "'incr_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'decr_miss'", ",", "stats", ".", "get", "(", "'decr_misses'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statincrdecr'", ",", "'total'", ",", "safe_sum", "(", "[", "stats", ".", "get", "(", "'incr_hits'", ")", ",", "stats", ".", "get", "(", "'decr_hits'", ")", ",", "stats", ".", "get", "(", "'incr_misses'", ")", ",", "stats", ".", "get", "(", "'decr_misses'", ")", "]", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statevict'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statevict'", ",", "'evict'", ",", "stats", ".", "get", "(", "'evictions'", ")", ")", "if", "self", ".", "graphHasField", "(", "'memcached_statevict'", ",", "'reclaim'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statevict'", ",", "'reclaim'", ",", "stats", ".", "get", "(", "'reclaimed'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_statauth'", ")", ":", "self", ".", "setGraphVal", "(", "'memcached_statauth'", ",", "'reqs'", ",", "stats", ".", "get", "(", "'auth_cmds'", ")", ")", "self", ".", "setGraphVal", "(", "'memcached_statauth'", ",", "'errors'", ",", "stats", ".", "get", "(", "'auth_errors'", ")", ")", "if", "self", ".", "hasGraph", "(", "'memcached_hitpct'", ")", ":", "prev_stats", "=", "self", ".", "_prev_stats", "for", "(", "field_name", ",", "field_hits", ",", "field_misses", ")", "in", "(", "(", "'set'", ",", "'set_hits'", ",", "'set_misses'", ")", ",", "(", "'get'", ",", "'get_hits'", ",", "'get_misses'", ")", ",", "(", "'del'", ",", "'delete_hits'", ",", "'delete_misses'", ")", ",", "(", "'cas'", ",", "'cas_hits'", ",", "'cas_misses'", ")", ",", "(", "'incr'", ",", "'incr_hits'", ",", "'incr_misses'", ")", ",", "(", "'decr'", ",", "'decr_hits'", ",", "'decr_misses'", ")", ")", ":", "if", "prev_stats", ":", "if", "(", "stats", ".", "has_key", "(", "field_hits", ")", "and", "prev_stats", ".", "has_key", "(", "field_hits", ")", "and", "stats", ".", "has_key", "(", "field_misses", ")", "and", "prev_stats", ".", "has_key", "(", "field_misses", ")", ")", ":", "hits", "=", "stats", "[", "field_hits", "]", "-", "prev_stats", "[", "field_hits", "]", "misses", "=", "stats", "[", "field_misses", "]", "-", "prev_stats", "[", "field_misses", "]", "total", "=", "hits", "+", "misses", "if", "total", ">", "0", ":", "val", "=", "100.0", "*", "hits", "/", "total", "else", ":", "val", "=", "0", "self", ".", "setGraphVal", "(", "'memcached_hitpct'", ",", "field_name", ",", "round", "(", "val", ",", "2", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/memcachedstats.py#L305-L440
aouyar/PyMunin
pymunin/plugins/memcachedstats.py
MuninMemcachedPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) return (serverInfo is not None)
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ serverInfo = MemcachedInfo(self._host, self._port, self._socket_file) return (serverInfo is not None)
[ "def", "autoconf", "(", "self", ")", ":", "serverInfo", "=", "MemcachedInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_socket_file", ")", "return", "(", "serverInfo", "is", "not", "None", ")" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/memcachedstats.py#L442-L449
ContextLab/quail
quail/distance.py
correlation
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
python
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
[ "def", "correlation", "(", "a", ",", "b", ")", ":", "if", "isinstance", "(", "a", ",", "list", ")", ":", "a", "=", "np", ".", "array", "(", "a", ")", "if", "isinstance", "(", "b", ",", "list", ")", ":", "b", "=", "np", ".", "array", "(", "b", ")", "a", "=", "a", ".", "reshape", "(", "1", ",", "-", "1", ")", "b", "=", "b", ".", "reshape", "(", "1", ",", "-", "1", ")", "return", "cdist", "(", "a", ",", "b", ",", "'correlation'", ")" ]
Returns correlation distance between a and b
[ "Returns", "correlation", "distance", "between", "a", "and", "b" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/distance.py#L9-L17
ContextLab/quail
quail/distance.py
euclidean
def euclidean(a, b): "Returns euclidean distance between a and b" return np.linalg.norm(np.subtract(a, b))
python
def euclidean(a, b): "Returns euclidean distance between a and b" return np.linalg.norm(np.subtract(a, b))
[ "def", "euclidean", "(", "a", ",", "b", ")", ":", "return", "np", ".", "linalg", ".", "norm", "(", "np", ".", "subtract", "(", "a", ",", "b", ")", ")" ]
Returns euclidean distance between a and b
[ "Returns", "euclidean", "distance", "between", "a", "and", "b" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/distance.py#L19-L21
aouyar/PyMunin
pysysinfo/process.py
ProcessInfo.execProcCmd
def execProcCmd(self, *args): """Execute ps command with positional params args and return result as list of lines. @param *args: Positional params for ps command. @return: List of output lines """ out = util.exec_command([psCmd,] + list(args)) return out.splitlines()
python
def execProcCmd(self, *args): """Execute ps command with positional params args and return result as list of lines. @param *args: Positional params for ps command. @return: List of output lines """ out = util.exec_command([psCmd,] + list(args)) return out.splitlines()
[ "def", "execProcCmd", "(", "self", ",", "*", "args", ")", ":", "out", "=", "util", ".", "exec_command", "(", "[", "psCmd", ",", "]", "+", "list", "(", "args", ")", ")", "return", "out", ".", "splitlines", "(", ")" ]
Execute ps command with positional params args and return result as list of lines. @param *args: Positional params for ps command. @return: List of output lines
[ "Execute", "ps", "command", "with", "positional", "params", "args", "and", "return", "result", "as", "list", "of", "lines", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L47-L56
aouyar/PyMunin
pysysinfo/process.py
ProcessInfo.parseProcCmd
def parseProcCmd(self, fields=('pid', 'user', 'cmd',), threads=False): """Execute ps command with custom output format with columns from fields and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: List of fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @return: List of headers and list of rows and columns. """ args = [] headers = [f.lower() for f in fields] args.append('--no-headers') args.append('-e') if threads: args.append('-T') field_ranges = [] fmt_strs = [] start = 0 for header in headers: field_width = psFieldWidth.get(header, psDefaultFieldWidth) fmt_strs.append('%s:%d' % (header, field_width)) end = start + field_width + 1 field_ranges.append((start,end)) start = end args.append('-o') args.append(','.join(fmt_strs)) lines = self.execProcCmd(*args) if len(lines) > 0: stats = [] for line in lines: cols = [] for (start, end) in field_ranges: cols.append(line[start:end].strip()) stats.append(cols) return {'headers': headers, 'stats': stats} else: return None
python
def parseProcCmd(self, fields=('pid', 'user', 'cmd',), threads=False): """Execute ps command with custom output format with columns from fields and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: List of fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @return: List of headers and list of rows and columns. """ args = [] headers = [f.lower() for f in fields] args.append('--no-headers') args.append('-e') if threads: args.append('-T') field_ranges = [] fmt_strs = [] start = 0 for header in headers: field_width = psFieldWidth.get(header, psDefaultFieldWidth) fmt_strs.append('%s:%d' % (header, field_width)) end = start + field_width + 1 field_ranges.append((start,end)) start = end args.append('-o') args.append(','.join(fmt_strs)) lines = self.execProcCmd(*args) if len(lines) > 0: stats = [] for line in lines: cols = [] for (start, end) in field_ranges: cols.append(line[start:end].strip()) stats.append(cols) return {'headers': headers, 'stats': stats} else: return None
[ "def", "parseProcCmd", "(", "self", ",", "fields", "=", "(", "'pid'", ",", "'user'", ",", "'cmd'", ",", ")", ",", "threads", "=", "False", ")", ":", "args", "=", "[", "]", "headers", "=", "[", "f", ".", "lower", "(", ")", "for", "f", "in", "fields", "]", "args", ".", "append", "(", "'--no-headers'", ")", "args", ".", "append", "(", "'-e'", ")", "if", "threads", ":", "args", ".", "append", "(", "'-T'", ")", "field_ranges", "=", "[", "]", "fmt_strs", "=", "[", "]", "start", "=", "0", "for", "header", "in", "headers", ":", "field_width", "=", "psFieldWidth", ".", "get", "(", "header", ",", "psDefaultFieldWidth", ")", "fmt_strs", ".", "append", "(", "'%s:%d'", "%", "(", "header", ",", "field_width", ")", ")", "end", "=", "start", "+", "field_width", "+", "1", "field_ranges", ".", "append", "(", "(", "start", ",", "end", ")", ")", "start", "=", "end", "args", ".", "append", "(", "'-o'", ")", "args", ".", "append", "(", "','", ".", "join", "(", "fmt_strs", ")", ")", "lines", "=", "self", ".", "execProcCmd", "(", "*", "args", ")", "if", "len", "(", "lines", ")", ">", "0", ":", "stats", "=", "[", "]", "for", "line", "in", "lines", ":", "cols", "=", "[", "]", "for", "(", "start", ",", "end", ")", "in", "field_ranges", ":", "cols", ".", "append", "(", "line", "[", "start", ":", "end", "]", ".", "strip", "(", ")", ")", "stats", ".", "append", "(", "cols", ")", "return", "{", "'headers'", ":", "headers", ",", "'stats'", ":", "stats", "}", "else", ":", "return", "None" ]
Execute ps command with custom output format with columns from fields and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: List of fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @return: List of headers and list of rows and columns.
[ "Execute", "ps", "command", "with", "custom", "output", "format", "with", "columns", "from", "fields", "and", "return", "result", "as", "a", "nested", "list", ".", "The", "Standard", "Format", "Specifiers", "from", "ps", "man", "page", "must", "be", "used", "for", "the", "fields", "parameter", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L58-L98
aouyar/PyMunin
pysysinfo/process.py
ProcessInfo.getProcList
def getProcList(self, fields=('pid', 'user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns columns from fields, select lines using the filters defined by kwargs and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: List of headers and list of rows and columns. """ field_list = list(fields) for key in kwargs: col = re.sub('(_ic)?(_regex)?$', '', key) if not col in field_list: field_list.append(col) pinfo = self.parseProcCmd(field_list, threads) if pinfo: if len(kwargs) > 0: pfilter = util.TableFilter() pfilter.registerFilters(**kwargs) stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats']) return {'headers': pinfo['headers'], 'stats': stats} else: return pinfo else: return None
python
def getProcList(self, fields=('pid', 'user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns columns from fields, select lines using the filters defined by kwargs and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: List of headers and list of rows and columns. """ field_list = list(fields) for key in kwargs: col = re.sub('(_ic)?(_regex)?$', '', key) if not col in field_list: field_list.append(col) pinfo = self.parseProcCmd(field_list, threads) if pinfo: if len(kwargs) > 0: pfilter = util.TableFilter() pfilter.registerFilters(**kwargs) stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats']) return {'headers': pinfo['headers'], 'stats': stats} else: return pinfo else: return None
[ "def", "getProcList", "(", "self", ",", "fields", "=", "(", "'pid'", ",", "'user'", ",", "'cmd'", ",", ")", ",", "threads", "=", "False", ",", "*", "*", "kwargs", ")", ":", "field_list", "=", "list", "(", "fields", ")", "for", "key", "in", "kwargs", ":", "col", "=", "re", ".", "sub", "(", "'(_ic)?(_regex)?$'", ",", "''", ",", "key", ")", "if", "not", "col", "in", "field_list", ":", "field_list", ".", "append", "(", "col", ")", "pinfo", "=", "self", ".", "parseProcCmd", "(", "field_list", ",", "threads", ")", "if", "pinfo", ":", "if", "len", "(", "kwargs", ")", ">", "0", ":", "pfilter", "=", "util", ".", "TableFilter", "(", ")", "pfilter", ".", "registerFilters", "(", "*", "*", "kwargs", ")", "stats", "=", "pfilter", ".", "applyFilters", "(", "pinfo", "[", "'headers'", "]", ",", "pinfo", "[", "'stats'", "]", ")", "return", "{", "'headers'", ":", "pinfo", "[", "'headers'", "]", ",", "'stats'", ":", "stats", "}", "else", ":", "return", "pinfo", "else", ":", "return", "None" ]
Execute ps command with custom output format with columns columns from fields, select lines using the filters defined by kwargs and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: List of headers and list of rows and columns.
[ "Execute", "ps", "command", "with", "custom", "output", "format", "with", "columns", "columns", "from", "fields", "select", "lines", "using", "the", "filters", "defined", "by", "kwargs", "and", "return", "result", "as", "a", "nested", "list", ".", "The", "Standard", "Format", "Specifiers", "from", "ps", "man", "page", "must", "be", "used", "for", "the", "fields", "parameter", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L100-L144
aouyar/PyMunin
pysysinfo/process.py
ProcessInfo.getProcDict
def getProcDict(self, fields=('user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns format with columns from fields, and return result as a nested dictionary with the key PID or SPID. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: user, cmd (PID or SPID column is included by default.) @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Nested dictionary indexed by: PID for process info. SPID for thread info. """ stats = {} field_list = list(fields) num_cols = len(field_list) if threads: key = 'spid' else: key = 'pid' try: key_idx = field_list.index(key) except ValueError: field_list.append(key) key_idx = len(field_list) - 1 result = self.getProcList(field_list, threads, **kwargs) if result is not None: headers = result['headers'][:num_cols] lines = result['stats'] if len(lines) > 1: for cols in lines: stats[cols[key_idx]] = dict(zip(headers, cols[:num_cols])) return stats else: return None
python
def getProcDict(self, fields=('user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns format with columns from fields, and return result as a nested dictionary with the key PID or SPID. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: user, cmd (PID or SPID column is included by default.) @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Nested dictionary indexed by: PID for process info. SPID for thread info. """ stats = {} field_list = list(fields) num_cols = len(field_list) if threads: key = 'spid' else: key = 'pid' try: key_idx = field_list.index(key) except ValueError: field_list.append(key) key_idx = len(field_list) - 1 result = self.getProcList(field_list, threads, **kwargs) if result is not None: headers = result['headers'][:num_cols] lines = result['stats'] if len(lines) > 1: for cols in lines: stats[cols[key_idx]] = dict(zip(headers, cols[:num_cols])) return stats else: return None
[ "def", "getProcDict", "(", "self", ",", "fields", "=", "(", "'user'", ",", "'cmd'", ",", ")", ",", "threads", "=", "False", ",", "*", "*", "kwargs", ")", ":", "stats", "=", "{", "}", "field_list", "=", "list", "(", "fields", ")", "num_cols", "=", "len", "(", "field_list", ")", "if", "threads", ":", "key", "=", "'spid'", "else", ":", "key", "=", "'pid'", "try", ":", "key_idx", "=", "field_list", ".", "index", "(", "key", ")", "except", "ValueError", ":", "field_list", ".", "append", "(", "key", ")", "key_idx", "=", "len", "(", "field_list", ")", "-", "1", "result", "=", "self", ".", "getProcList", "(", "field_list", ",", "threads", ",", "*", "*", "kwargs", ")", "if", "result", "is", "not", "None", ":", "headers", "=", "result", "[", "'headers'", "]", "[", ":", "num_cols", "]", "lines", "=", "result", "[", "'stats'", "]", "if", "len", "(", "lines", ")", ">", "1", ":", "for", "cols", "in", "lines", ":", "stats", "[", "cols", "[", "key_idx", "]", "]", "=", "dict", "(", "zip", "(", "headers", ",", "cols", "[", ":", "num_cols", "]", ")", ")", "return", "stats", "else", ":", "return", "None" ]
Execute ps command with custom output format with columns format with columns from fields, and return result as a nested dictionary with the key PID or SPID. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: user, cmd (PID or SPID column is included by default.) @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Nested dictionary indexed by: PID for process info. SPID for thread info.
[ "Execute", "ps", "command", "with", "custom", "output", "format", "with", "columns", "format", "with", "columns", "from", "fields", "and", "return", "result", "as", "a", "nested", "dictionary", "with", "the", "key", "PID", "or", "SPID", ".", "The", "Standard", "Format", "Specifiers", "from", "ps", "man", "page", "must", "be", "used", "for", "the", "fields", "parameter", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L146-L198
aouyar/PyMunin
pysysinfo/process.py
ProcessInfo.getProcStatStatus
def getProcStatStatus(self, threads=False, **kwargs): """Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters. """ procs = self.getProcList(['stat',], threads=threads, **kwargs) status = dict(zip(procStatusNames.values(), [0,] * len(procStatusNames))) prio = {'high': 0, 'low': 0, 'norm': 0, 'locked_in_mem': 0} total = 0 locked_in_mem = 0 if procs is not None: for cols in procs['stats']: col_stat = cols[0] status[procStatusNames[col_stat[0]]] += 1 if '<' in col_stat[1:]: prio['high'] += 1 elif 'N' in col_stat[1:]: prio['low'] += 1 else: prio['norm'] += 1 if 'L' in col_stat[1:]: locked_in_mem += 1 total += 1 return {'status': status, 'prio': prio, 'locked_in_mem': locked_in_mem, 'total': total}
python
def getProcStatStatus(self, threads=False, **kwargs): """Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters. """ procs = self.getProcList(['stat',], threads=threads, **kwargs) status = dict(zip(procStatusNames.values(), [0,] * len(procStatusNames))) prio = {'high': 0, 'low': 0, 'norm': 0, 'locked_in_mem': 0} total = 0 locked_in_mem = 0 if procs is not None: for cols in procs['stats']: col_stat = cols[0] status[procStatusNames[col_stat[0]]] += 1 if '<' in col_stat[1:]: prio['high'] += 1 elif 'N' in col_stat[1:]: prio['low'] += 1 else: prio['norm'] += 1 if 'L' in col_stat[1:]: locked_in_mem += 1 total += 1 return {'status': status, 'prio': prio, 'locked_in_mem': locked_in_mem, 'total': total}
[ "def", "getProcStatStatus", "(", "self", ",", "threads", "=", "False", ",", "*", "*", "kwargs", ")", ":", "procs", "=", "self", ".", "getProcList", "(", "[", "'stat'", ",", "]", ",", "threads", "=", "threads", ",", "*", "*", "kwargs", ")", "status", "=", "dict", "(", "zip", "(", "procStatusNames", ".", "values", "(", ")", ",", "[", "0", ",", "]", "*", "len", "(", "procStatusNames", ")", ")", ")", "prio", "=", "{", "'high'", ":", "0", ",", "'low'", ":", "0", ",", "'norm'", ":", "0", ",", "'locked_in_mem'", ":", "0", "}", "total", "=", "0", "locked_in_mem", "=", "0", "if", "procs", "is", "not", "None", ":", "for", "cols", "in", "procs", "[", "'stats'", "]", ":", "col_stat", "=", "cols", "[", "0", "]", "status", "[", "procStatusNames", "[", "col_stat", "[", "0", "]", "]", "]", "+=", "1", "if", "'<'", "in", "col_stat", "[", "1", ":", "]", ":", "prio", "[", "'high'", "]", "+=", "1", "elif", "'N'", "in", "col_stat", "[", "1", ":", "]", ":", "prio", "[", "'low'", "]", "+=", "1", "else", ":", "prio", "[", "'norm'", "]", "+=", "1", "if", "'L'", "in", "col_stat", "[", "1", ":", "]", ":", "locked_in_mem", "+=", "1", "total", "+=", "1", "return", "{", "'status'", ":", "status", ",", "'prio'", ":", "prio", ",", "'locked_in_mem'", ":", "locked_in_mem", ",", "'total'", ":", "total", "}" ]
Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters.
[ "Return", "process", "counts", "per", "status", "and", "priority", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/process.py#L200-L242
aouyar/PyMunin
pymunin/plugins/ntpstats.py
MuninNTPstatsPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() stats = ntpinfo.getPeerStats() if stats: if self.hasGraph('ntp_peer_stratum'): self.setGraphVal('ntp_peer_stratum', 'stratum', stats.get('stratum')) if self.hasGraph('ntp_peer_stats'): self.setGraphVal('ntp_peer_stats', 'offset', stats.get('offset')) self.setGraphVal('ntp_peer_stats', 'delay', stats.get('delay')) self.setGraphVal('ntp_peer_stats', 'jitter', stats.get('jitter'))
python
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() stats = ntpinfo.getPeerStats() if stats: if self.hasGraph('ntp_peer_stratum'): self.setGraphVal('ntp_peer_stratum', 'stratum', stats.get('stratum')) if self.hasGraph('ntp_peer_stats'): self.setGraphVal('ntp_peer_stats', 'offset', stats.get('offset')) self.setGraphVal('ntp_peer_stats', 'delay', stats.get('delay')) self.setGraphVal('ntp_peer_stats', 'jitter', stats.get('jitter'))
[ "def", "retrieveVals", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "stats", "=", "ntpinfo", ".", "getPeerStats", "(", ")", "if", "stats", ":", "if", "self", ".", "hasGraph", "(", "'ntp_peer_stratum'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_peer_stratum'", ",", "'stratum'", ",", "stats", ".", "get", "(", "'stratum'", ")", ")", "if", "self", ".", "hasGraph", "(", "'ntp_peer_stats'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_peer_stats'", ",", "'offset'", ",", "stats", ".", "get", "(", "'offset'", ")", ")", "self", ".", "setGraphVal", "(", "'ntp_peer_stats'", ",", "'delay'", ",", "stats", ".", "get", "(", "'delay'", ")", ")", "self", ".", "setGraphVal", "(", "'ntp_peer_stats'", ",", "'jitter'", ",", "stats", ".", "get", "(", "'jitter'", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntpstats.py#L84-L98
aouyar/PyMunin
pymunin/__init__.py
muninMain
def muninMain(pluginClass, argv=None, env=None, debug=False): """Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False) """ if argv is None: argv = sys.argv if env is None: env = os.environ debug = debug or env.has_key('MUNIN_DEBUG') if len(argv) > 1 and argv[1] == 'autoconf': autoconf = True else: autoconf = False try: plugin = pluginClass(argv, env, debug) ret = plugin.run() if ret: return 0 else: return 1 except Exception: print >> sys.stderr, "ERROR: %s" % repr(sys.exc_info()[1]) if autoconf: print "no" if debug: raise else: if autoconf: return 0 else: return 1
python
def muninMain(pluginClass, argv=None, env=None, debug=False): """Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False) """ if argv is None: argv = sys.argv if env is None: env = os.environ debug = debug or env.has_key('MUNIN_DEBUG') if len(argv) > 1 and argv[1] == 'autoconf': autoconf = True else: autoconf = False try: plugin = pluginClass(argv, env, debug) ret = plugin.run() if ret: return 0 else: return 1 except Exception: print >> sys.stderr, "ERROR: %s" % repr(sys.exc_info()[1]) if autoconf: print "no" if debug: raise else: if autoconf: return 0 else: return 1
[ "def", "muninMain", "(", "pluginClass", ",", "argv", "=", "None", ",", "env", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "debug", "=", "debug", "or", "env", ".", "has_key", "(", "'MUNIN_DEBUG'", ")", "if", "len", "(", "argv", ")", ">", "1", "and", "argv", "[", "1", "]", "==", "'autoconf'", ":", "autoconf", "=", "True", "else", ":", "autoconf", "=", "False", "try", ":", "plugin", "=", "pluginClass", "(", "argv", ",", "env", ",", "debug", ")", "ret", "=", "plugin", ".", "run", "(", ")", "if", "ret", ":", "return", "0", "else", ":", "return", "1", "except", "Exception", ":", "print", ">>", "sys", ".", "stderr", ",", "\"ERROR: %s\"", "%", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "if", "autoconf", ":", "print", "\"no\"", "if", "debug", ":", "raise", "else", ":", "if", "autoconf", ":", "return", "0", "else", ":", "return", "1" ]
Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False)
[ "Main", "Block", "for", "Munin", "Plugins", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L938-L973
aouyar/PyMunin
pymunin/__init__.py
fixLabel
def fixLabel(label, maxlen, delim=None, repl='', truncend=True): """Truncate long graph and field labels. @param label: Label text. @param maxlen: Maximum field label length in characters. No maximum field label length is enforced by default. @param delim: Delimiter for field labels field labels longer than maxlen will preferably be truncated at delimiter. @param repl: Replacement string for truncated part. @param truncend: Truncate the end of label name if True. (Default) The beginning part of label will be truncated if False. """ if len(label) <= maxlen: return label else: maxlen -= len(repl) if delim is not None: if truncend: end = label.rfind(delim, 0, maxlen) if end > 0: return label[:end+1] + repl else: start = label.find(delim, len(label) - maxlen) if start > 0: return repl + label[start:] if truncend: return label[:maxlen] + repl else: return repl + label[-maxlen:]
python
def fixLabel(label, maxlen, delim=None, repl='', truncend=True): """Truncate long graph and field labels. @param label: Label text. @param maxlen: Maximum field label length in characters. No maximum field label length is enforced by default. @param delim: Delimiter for field labels field labels longer than maxlen will preferably be truncated at delimiter. @param repl: Replacement string for truncated part. @param truncend: Truncate the end of label name if True. (Default) The beginning part of label will be truncated if False. """ if len(label) <= maxlen: return label else: maxlen -= len(repl) if delim is not None: if truncend: end = label.rfind(delim, 0, maxlen) if end > 0: return label[:end+1] + repl else: start = label.find(delim, len(label) - maxlen) if start > 0: return repl + label[start:] if truncend: return label[:maxlen] + repl else: return repl + label[-maxlen:]
[ "def", "fixLabel", "(", "label", ",", "maxlen", ",", "delim", "=", "None", ",", "repl", "=", "''", ",", "truncend", "=", "True", ")", ":", "if", "len", "(", "label", ")", "<=", "maxlen", ":", "return", "label", "else", ":", "maxlen", "-=", "len", "(", "repl", ")", "if", "delim", "is", "not", "None", ":", "if", "truncend", ":", "end", "=", "label", ".", "rfind", "(", "delim", ",", "0", ",", "maxlen", ")", "if", "end", ">", "0", ":", "return", "label", "[", ":", "end", "+", "1", "]", "+", "repl", "else", ":", "start", "=", "label", ".", "find", "(", "delim", ",", "len", "(", "label", ")", "-", "maxlen", ")", "if", "start", ">", "0", ":", "return", "repl", "+", "label", "[", "start", ":", "]", "if", "truncend", ":", "return", "label", "[", ":", "maxlen", "]", "+", "repl", "else", ":", "return", "repl", "+", "label", "[", "-", "maxlen", ":", "]" ]
Truncate long graph and field labels. @param label: Label text. @param maxlen: Maximum field label length in characters. No maximum field label length is enforced by default. @param delim: Delimiter for field labels field labels longer than maxlen will preferably be truncated at delimiter. @param repl: Replacement string for truncated part. @param truncend: Truncate the end of label name if True. (Default) The beginning part of label will be truncated if False.
[ "Truncate", "long", "graph", "and", "field", "labels", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L975-L1004
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._parseEnv
def _parseEnv(self, env=None): """Private method for parsing through environment variables. Parses for environment variables common to all Munin Plugins: - MUNIN_STATEFILE - MUNIN_CAP_DIRTY_CONFIG - nested_graphs @param env: Dictionary of environment variables. (Only used for testing. initialized automatically by constructor. """ if not env: env = self._env if env.has_key('MUNIN_STATEFILE'): self._stateFile = env.get('MUNIN_STATEFILE') else: self._stateFile = '/tmp/munin-state-%s' % self.plugin_name if env.has_key('MUNIN_CAP_DIRTY_CONFIG'): self._dirtyConfig = True
python
def _parseEnv(self, env=None): """Private method for parsing through environment variables. Parses for environment variables common to all Munin Plugins: - MUNIN_STATEFILE - MUNIN_CAP_DIRTY_CONFIG - nested_graphs @param env: Dictionary of environment variables. (Only used for testing. initialized automatically by constructor. """ if not env: env = self._env if env.has_key('MUNIN_STATEFILE'): self._stateFile = env.get('MUNIN_STATEFILE') else: self._stateFile = '/tmp/munin-state-%s' % self.plugin_name if env.has_key('MUNIN_CAP_DIRTY_CONFIG'): self._dirtyConfig = True
[ "def", "_parseEnv", "(", "self", ",", "env", "=", "None", ")", ":", "if", "not", "env", ":", "env", "=", "self", ".", "_env", "if", "env", ".", "has_key", "(", "'MUNIN_STATEFILE'", ")", ":", "self", ".", "_stateFile", "=", "env", ".", "get", "(", "'MUNIN_STATEFILE'", ")", "else", ":", "self", ".", "_stateFile", "=", "'/tmp/munin-state-%s'", "%", "self", ".", "plugin_name", "if", "env", ".", "has_key", "(", "'MUNIN_CAP_DIRTY_CONFIG'", ")", ":", "self", ".", "_dirtyConfig", "=", "True" ]
Private method for parsing through environment variables. Parses for environment variables common to all Munin Plugins: - MUNIN_STATEFILE - MUNIN_CAP_DIRTY_CONFIG - nested_graphs @param env: Dictionary of environment variables. (Only used for testing. initialized automatically by constructor.
[ "Private", "method", "for", "parsing", "through", "environment", "variables", ".", "Parses", "for", "environment", "variables", "common", "to", "all", "Munin", "Plugins", ":", "-", "MUNIN_STATEFILE", "-", "MUNIN_CAP_DIRTY_CONFIG", "-", "nested_graphs" ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L149-L169
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._getGraph
def _getGraph(self, graph_name, fail_noexist=False): """Private method for returning graph object with name graph_name. @param graph_name: Graph Name @param fail_noexist: If true throw exception if there is no graph with name graph_name. @return: Graph Object or None """ graph = self._graphDict.get(graph_name) if fail_noexist and graph is None: raise AttributeError("Invalid graph name: %s" % graph_name) else: return graph
python
def _getGraph(self, graph_name, fail_noexist=False): """Private method for returning graph object with name graph_name. @param graph_name: Graph Name @param fail_noexist: If true throw exception if there is no graph with name graph_name. @return: Graph Object or None """ graph = self._graphDict.get(graph_name) if fail_noexist and graph is None: raise AttributeError("Invalid graph name: %s" % graph_name) else: return graph
[ "def", "_getGraph", "(", "self", ",", "graph_name", ",", "fail_noexist", "=", "False", ")", ":", "graph", "=", "self", ".", "_graphDict", ".", "get", "(", "graph_name", ")", "if", "fail_noexist", "and", "graph", "is", "None", ":", "raise", "AttributeError", "(", "\"Invalid graph name: %s\"", "%", "graph_name", ")", "else", ":", "return", "graph" ]
Private method for returning graph object with name graph_name. @param graph_name: Graph Name @param fail_noexist: If true throw exception if there is no graph with name graph_name. @return: Graph Object or None
[ "Private", "method", "for", "returning", "graph", "object", "with", "name", "graph_name", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L171-L184
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._getSubGraph
def _getSubGraph(self, parent_name, graph_name, fail_noexist=False): """Private method for returning subgraph object with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param fail_noexist: If true throw exception if there is no subgraph with name graph_name. @return: Graph Object or None """ if not self.isMultigraph: raise AttributeError("Simple Munin Plugins cannot have subgraphs.") if self._graphDict.has_key(parent_name) is not None: subgraphs = self._subgraphDict.get(parent_name) if subgraphs is not None: subgraph = subgraphs.get(graph_name) if fail_noexist and subgraph is None: raise AttributeError("Invalid subgraph name %s" "for graph %s." % (graph_name, parent_name)) else: return subgraph else: raise AttributeError("Parent graph %s has no subgraphs." % (parent_name,)) else: raise AttributeError("Invalid parent graph name %s " "for subgraph %s." % (parent_name, graph_name))
python
def _getSubGraph(self, parent_name, graph_name, fail_noexist=False): """Private method for returning subgraph object with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param fail_noexist: If true throw exception if there is no subgraph with name graph_name. @return: Graph Object or None """ if not self.isMultigraph: raise AttributeError("Simple Munin Plugins cannot have subgraphs.") if self._graphDict.has_key(parent_name) is not None: subgraphs = self._subgraphDict.get(parent_name) if subgraphs is not None: subgraph = subgraphs.get(graph_name) if fail_noexist and subgraph is None: raise AttributeError("Invalid subgraph name %s" "for graph %s." % (graph_name, parent_name)) else: return subgraph else: raise AttributeError("Parent graph %s has no subgraphs." % (parent_name,)) else: raise AttributeError("Invalid parent graph name %s " "for subgraph %s." % (parent_name, graph_name))
[ "def", "_getSubGraph", "(", "self", ",", "parent_name", ",", "graph_name", ",", "fail_noexist", "=", "False", ")", ":", "if", "not", "self", ".", "isMultigraph", ":", "raise", "AttributeError", "(", "\"Simple Munin Plugins cannot have subgraphs.\"", ")", "if", "self", ".", "_graphDict", ".", "has_key", "(", "parent_name", ")", "is", "not", "None", ":", "subgraphs", "=", "self", ".", "_subgraphDict", ".", "get", "(", "parent_name", ")", "if", "subgraphs", "is", "not", "None", ":", "subgraph", "=", "subgraphs", ".", "get", "(", "graph_name", ")", "if", "fail_noexist", "and", "subgraph", "is", "None", ":", "raise", "AttributeError", "(", "\"Invalid subgraph name %s\"", "\"for graph %s.\"", "%", "(", "graph_name", ",", "parent_name", ")", ")", "else", ":", "return", "subgraph", "else", ":", "raise", "AttributeError", "(", "\"Parent graph %s has no subgraphs.\"", "%", "(", "parent_name", ",", ")", ")", "else", ":", "raise", "AttributeError", "(", "\"Invalid parent graph name %s \"", "\"for subgraph %s.\"", "%", "(", "parent_name", ",", "graph_name", ")", ")" ]
Private method for returning subgraph object with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param fail_noexist: If true throw exception if there is no subgraph with name graph_name. @return: Graph Object or None
[ "Private", "method", "for", "returning", "subgraph", "object", "with", "name", "graph_name", "and", "parent", "graph", "with", "name", "parent_name", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L186-L214
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._getMultigraphID
def _getMultigraphID(self, graph_name, subgraph_name=None): """Private method for generating Multigraph ID from graph name and subgraph name. @param graph_name: Graph Name. @param subgraph_name: Subgraph Name. @return: Multigraph ID. """ if self.isMultiInstance and self._instanceName is not None: if subgraph_name is None: return "%s_%s" % (graph_name, self._instanceName) else: return "%s_%s.%s_%s" % (graph_name, self._instanceName, subgraph_name, self._instanceName) else: if subgraph_name is None: return graph_name else: return "%s.%s" % (graph_name, subgraph_name)
python
def _getMultigraphID(self, graph_name, subgraph_name=None): """Private method for generating Multigraph ID from graph name and subgraph name. @param graph_name: Graph Name. @param subgraph_name: Subgraph Name. @return: Multigraph ID. """ if self.isMultiInstance and self._instanceName is not None: if subgraph_name is None: return "%s_%s" % (graph_name, self._instanceName) else: return "%s_%s.%s_%s" % (graph_name, self._instanceName, subgraph_name, self._instanceName) else: if subgraph_name is None: return graph_name else: return "%s.%s" % (graph_name, subgraph_name)
[ "def", "_getMultigraphID", "(", "self", ",", "graph_name", ",", "subgraph_name", "=", "None", ")", ":", "if", "self", ".", "isMultiInstance", "and", "self", ".", "_instanceName", "is", "not", "None", ":", "if", "subgraph_name", "is", "None", ":", "return", "\"%s_%s\"", "%", "(", "graph_name", ",", "self", ".", "_instanceName", ")", "else", ":", "return", "\"%s_%s.%s_%s\"", "%", "(", "graph_name", ",", "self", ".", "_instanceName", ",", "subgraph_name", ",", "self", ".", "_instanceName", ")", "else", ":", "if", "subgraph_name", "is", "None", ":", "return", "graph_name", "else", ":", "return", "\"%s.%s\"", "%", "(", "graph_name", ",", "subgraph_name", ")" ]
Private method for generating Multigraph ID from graph name and subgraph name. @param graph_name: Graph Name. @param subgraph_name: Subgraph Name. @return: Multigraph ID.
[ "Private", "method", "for", "generating", "Multigraph", "ID", "from", "graph", "name", "and", "subgraph", "name", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L216-L235
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._formatConfig
def _formatConfig(self, conf_dict): """Formats configuration directory from Munin Graph and returns multi-line value entries for the plugin config cycle. @param conf_dict: Configuration directory. @return: Multi-line text. """ confs = [] graph_dict = conf_dict['graph'] field_list = conf_dict['fields'] # Order and format Graph Attributes title = graph_dict.get('title') if title is not None: if self.isMultiInstance and self._instanceLabel is not None: if self._instanceLabelType == 'suffix': confs.append("graph_%s %s - %s" % ('title', title, self._instanceLabel,)) elif self._instanceLabelType == 'prefix': confs.append("graph_%s %s - %s" % ('title', self._instanceLabel, title,)) else: confs.append("graph_%s %s" % ('title', title)) for key in ('category', 'vlabel', 'info', 'args', 'period', 'scale', 'total', 'order', 'printf', 'width', 'height'): val = graph_dict.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("graph_%s %s" % (key, val)) # Order and Format Field Attributes for (field_name, field_attrs) in field_list: for key in ('label', 'type', 'draw', 'info', 'extinfo', 'colour', 'negative', 'graph', 'min', 'max', 'cdef', 'line', 'warning', 'critical'): val = field_attrs.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("%s.%s %s" % (field_name, key, val)) return "\n".join(confs)
python
def _formatConfig(self, conf_dict): """Formats configuration directory from Munin Graph and returns multi-line value entries for the plugin config cycle. @param conf_dict: Configuration directory. @return: Multi-line text. """ confs = [] graph_dict = conf_dict['graph'] field_list = conf_dict['fields'] # Order and format Graph Attributes title = graph_dict.get('title') if title is not None: if self.isMultiInstance and self._instanceLabel is not None: if self._instanceLabelType == 'suffix': confs.append("graph_%s %s - %s" % ('title', title, self._instanceLabel,)) elif self._instanceLabelType == 'prefix': confs.append("graph_%s %s - %s" % ('title', self._instanceLabel, title,)) else: confs.append("graph_%s %s" % ('title', title)) for key in ('category', 'vlabel', 'info', 'args', 'period', 'scale', 'total', 'order', 'printf', 'width', 'height'): val = graph_dict.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("graph_%s %s" % (key, val)) # Order and Format Field Attributes for (field_name, field_attrs) in field_list: for key in ('label', 'type', 'draw', 'info', 'extinfo', 'colour', 'negative', 'graph', 'min', 'max', 'cdef', 'line', 'warning', 'critical'): val = field_attrs.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("%s.%s %s" % (field_name, key, val)) return "\n".join(confs)
[ "def", "_formatConfig", "(", "self", ",", "conf_dict", ")", ":", "confs", "=", "[", "]", "graph_dict", "=", "conf_dict", "[", "'graph'", "]", "field_list", "=", "conf_dict", "[", "'fields'", "]", "# Order and format Graph Attributes", "title", "=", "graph_dict", ".", "get", "(", "'title'", ")", "if", "title", "is", "not", "None", ":", "if", "self", ".", "isMultiInstance", "and", "self", ".", "_instanceLabel", "is", "not", "None", ":", "if", "self", ".", "_instanceLabelType", "==", "'suffix'", ":", "confs", ".", "append", "(", "\"graph_%s %s - %s\"", "%", "(", "'title'", ",", "title", ",", "self", ".", "_instanceLabel", ",", ")", ")", "elif", "self", ".", "_instanceLabelType", "==", "'prefix'", ":", "confs", ".", "append", "(", "\"graph_%s %s - %s\"", "%", "(", "'title'", ",", "self", ".", "_instanceLabel", ",", "title", ",", ")", ")", "else", ":", "confs", ".", "append", "(", "\"graph_%s %s\"", "%", "(", "'title'", ",", "title", ")", ")", "for", "key", "in", "(", "'category'", ",", "'vlabel'", ",", "'info'", ",", "'args'", ",", "'period'", ",", "'scale'", ",", "'total'", ",", "'order'", ",", "'printf'", ",", "'width'", ",", "'height'", ")", ":", "val", "=", "graph_dict", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", ":", "if", "isinstance", "(", "val", ",", "bool", ")", ":", "if", "val", ":", "val", "=", "\"yes\"", "else", ":", "val", "=", "\"no\"", "confs", ".", "append", "(", "\"graph_%s %s\"", "%", "(", "key", ",", "val", ")", ")", "# Order and Format Field Attributes", "for", "(", "field_name", ",", "field_attrs", ")", "in", "field_list", ":", "for", "key", "in", "(", "'label'", ",", "'type'", ",", "'draw'", ",", "'info'", ",", "'extinfo'", ",", "'colour'", ",", "'negative'", ",", "'graph'", ",", "'min'", ",", "'max'", ",", "'cdef'", ",", "'line'", ",", "'warning'", ",", "'critical'", ")", ":", "val", "=", "field_attrs", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", ":", "if", "isinstance", "(", "val", ",", "bool", ")", ":", "if", "val", ":", "val", "=", "\"yes\"", "else", ":", "val", "=", "\"no\"", "confs", ".", "append", "(", "\"%s.%s %s\"", "%", "(", "field_name", ",", "key", ",", "val", ")", ")", "return", "\"\\n\"", ".", "join", "(", "confs", ")" ]
Formats configuration directory from Munin Graph and returns multi-line value entries for the plugin config cycle. @param conf_dict: Configuration directory. @return: Multi-line text.
[ "Formats", "configuration", "directory", "from", "Munin", "Graph", "and", "returns", "multi", "-", "line", "value", "entries", "for", "the", "plugin", "config", "cycle", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L237-L287
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin._formatVals
def _formatVals(self, val_list): """Formats value list from Munin Graph and returns multi-line value entries for the plugin fetch cycle. @param val_list: List of name-value pairs. @return: Multi-line text. """ vals = [] for (name, val) in val_list: if val is not None: if isinstance(val, float): vals.append("%s.value %f" % (name, val)) else: vals.append("%s.value %s" % (name, val)) else: vals.append("%s.value U" % (name,)) return "\n".join(vals)
python
def _formatVals(self, val_list): """Formats value list from Munin Graph and returns multi-line value entries for the plugin fetch cycle. @param val_list: List of name-value pairs. @return: Multi-line text. """ vals = [] for (name, val) in val_list: if val is not None: if isinstance(val, float): vals.append("%s.value %f" % (name, val)) else: vals.append("%s.value %s" % (name, val)) else: vals.append("%s.value U" % (name,)) return "\n".join(vals)
[ "def", "_formatVals", "(", "self", ",", "val_list", ")", ":", "vals", "=", "[", "]", "for", "(", "name", ",", "val", ")", "in", "val_list", ":", "if", "val", "is", "not", "None", ":", "if", "isinstance", "(", "val", ",", "float", ")", ":", "vals", ".", "append", "(", "\"%s.value %f\"", "%", "(", "name", ",", "val", ")", ")", "else", ":", "vals", ".", "append", "(", "\"%s.value %s\"", "%", "(", "name", ",", "val", ")", ")", "else", ":", "vals", ".", "append", "(", "\"%s.value U\"", "%", "(", "name", ",", ")", ")", "return", "\"\\n\"", ".", "join", "(", "vals", ")" ]
Formats value list from Munin Graph and returns multi-line value entries for the plugin fetch cycle. @param val_list: List of name-value pairs. @return: Multi-line text.
[ "Formats", "value", "list", "from", "Munin", "Graph", "and", "returns", "multi", "-", "line", "value", "entries", "for", "the", "plugin", "fetch", "cycle", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L289-L306
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin.envGet
def envGet(self, name, default=None, conv=None): """Return value for environment variable or None. @param name: Name of environment variable. @param default: Default value if variable is undefined. @param conv: Function for converting value to desired type. @return: Value of environment variable. """ if self._env.has_key(name): if conv is not None: return conv(self._env.get(name)) else: return self._env.get(name) else: return default
python
def envGet(self, name, default=None, conv=None): """Return value for environment variable or None. @param name: Name of environment variable. @param default: Default value if variable is undefined. @param conv: Function for converting value to desired type. @return: Value of environment variable. """ if self._env.has_key(name): if conv is not None: return conv(self._env.get(name)) else: return self._env.get(name) else: return default
[ "def", "envGet", "(", "self", ",", "name", ",", "default", "=", "None", ",", "conv", "=", "None", ")", ":", "if", "self", ".", "_env", ".", "has_key", "(", "name", ")", ":", "if", "conv", "is", "not", "None", ":", "return", "conv", "(", "self", ".", "_env", ".", "get", "(", "name", ")", ")", "else", ":", "return", "self", ".", "_env", ".", "get", "(", "name", ")", "else", ":", "return", "default" ]
Return value for environment variable or None. @param name: Name of environment variable. @param default: Default value if variable is undefined. @param conv: Function for converting value to desired type. @return: Value of environment variable.
[ "Return", "value", "for", "environment", "variable", "or", "None", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L317-L332
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin.envGetList
def envGetList(self, name, attr_regex = '^\w+$', conv=None): """Parse the plugin environment variables to return list from variable with name list_<name>. The value of the variable must be a comma separated list of items. @param name: Name of list. (Also determines suffix for environment variable name.) @param attr_regex: If the regex is defined, the items in the list are ignored unless they comply with the format dictated by the match regex. @param conv: Function for converting value to desired type. @return: List of items. """ key = "list_%s" % name item_list = [] if self._env.has_key(key): if attr_regex: recomp = re.compile(attr_regex) else: recomp = None for attr in self._env[key].split(','): attr = attr.strip() if recomp is None or recomp.search(attr): if conv is not None: item_list.append(conv(attr)) else: item_list.append(attr) return item_list
python
def envGetList(self, name, attr_regex = '^\w+$', conv=None): """Parse the plugin environment variables to return list from variable with name list_<name>. The value of the variable must be a comma separated list of items. @param name: Name of list. (Also determines suffix for environment variable name.) @param attr_regex: If the regex is defined, the items in the list are ignored unless they comply with the format dictated by the match regex. @param conv: Function for converting value to desired type. @return: List of items. """ key = "list_%s" % name item_list = [] if self._env.has_key(key): if attr_regex: recomp = re.compile(attr_regex) else: recomp = None for attr in self._env[key].split(','): attr = attr.strip() if recomp is None or recomp.search(attr): if conv is not None: item_list.append(conv(attr)) else: item_list.append(attr) return item_list
[ "def", "envGetList", "(", "self", ",", "name", ",", "attr_regex", "=", "'^\\w+$'", ",", "conv", "=", "None", ")", ":", "key", "=", "\"list_%s\"", "%", "name", "item_list", "=", "[", "]", "if", "self", ".", "_env", ".", "has_key", "(", "key", ")", ":", "if", "attr_regex", ":", "recomp", "=", "re", ".", "compile", "(", "attr_regex", ")", "else", ":", "recomp", "=", "None", "for", "attr", "in", "self", ".", "_env", "[", "key", "]", ".", "split", "(", "','", ")", ":", "attr", "=", "attr", ".", "strip", "(", ")", "if", "recomp", "is", "None", "or", "recomp", ".", "search", "(", "attr", ")", ":", "if", "conv", "is", "not", "None", ":", "item_list", ".", "append", "(", "conv", "(", "attr", ")", ")", "else", ":", "item_list", ".", "append", "(", "attr", ")", "return", "item_list" ]
Parse the plugin environment variables to return list from variable with name list_<name>. The value of the variable must be a comma separated list of items. @param name: Name of list. (Also determines suffix for environment variable name.) @param attr_regex: If the regex is defined, the items in the list are ignored unless they comply with the format dictated by the match regex. @param conv: Function for converting value to desired type. @return: List of items.
[ "Parse", "the", "plugin", "environment", "variables", "to", "return", "list", "from", "variable", "with", "name", "list_<name", ">", ".", "The", "value", "of", "the", "variable", "must", "be", "a", "comma", "separated", "list", "of", "items", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L335-L364
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin.envRegisterFilter
def envRegisterFilter(self, name, attr_regex = '^\w+$', default = True): """Register filter for including, excluding attributes in graphs through the use of include_<name> and exclude_<name> environment variables. The value of the variables must be a comma separated list of items. @param name: Name of filter. (Also determines suffix for environment variable name.) @param attr_regex: Regular expression string for checking valid items. @param default: Filter default. Applies when the include list is not defined and the attribute is not in the exclude list. """ attrs = {} for prefix in ('include', 'exclude'): key = "%s_%s" % (prefix, name) val = self._env.get(key) if val: attrs[prefix] = [attr.strip() for attr in val.split(',')] else: attrs[prefix] = [] self._filters[name] = MuninAttrFilter(attrs['include'], attrs['exclude'], attr_regex, default)
python
def envRegisterFilter(self, name, attr_regex = '^\w+$', default = True): """Register filter for including, excluding attributes in graphs through the use of include_<name> and exclude_<name> environment variables. The value of the variables must be a comma separated list of items. @param name: Name of filter. (Also determines suffix for environment variable name.) @param attr_regex: Regular expression string for checking valid items. @param default: Filter default. Applies when the include list is not defined and the attribute is not in the exclude list. """ attrs = {} for prefix in ('include', 'exclude'): key = "%s_%s" % (prefix, name) val = self._env.get(key) if val: attrs[prefix] = [attr.strip() for attr in val.split(',')] else: attrs[prefix] = [] self._filters[name] = MuninAttrFilter(attrs['include'], attrs['exclude'], attr_regex, default)
[ "def", "envRegisterFilter", "(", "self", ",", "name", ",", "attr_regex", "=", "'^\\w+$'", ",", "default", "=", "True", ")", ":", "attrs", "=", "{", "}", "for", "prefix", "in", "(", "'include'", ",", "'exclude'", ")", ":", "key", "=", "\"%s_%s\"", "%", "(", "prefix", ",", "name", ")", "val", "=", "self", ".", "_env", ".", "get", "(", "key", ")", "if", "val", ":", "attrs", "[", "prefix", "]", "=", "[", "attr", ".", "strip", "(", ")", "for", "attr", "in", "val", ".", "split", "(", "','", ")", "]", "else", ":", "attrs", "[", "prefix", "]", "=", "[", "]", "self", ".", "_filters", "[", "name", "]", "=", "MuninAttrFilter", "(", "attrs", "[", "'include'", "]", ",", "attrs", "[", "'exclude'", "]", ",", "attr_regex", ",", "default", ")" ]
Register filter for including, excluding attributes in graphs through the use of include_<name> and exclude_<name> environment variables. The value of the variables must be a comma separated list of items. @param name: Name of filter. (Also determines suffix for environment variable name.) @param attr_regex: Regular expression string for checking valid items. @param default: Filter default. Applies when the include list is not defined and the attribute is not in the exclude list.
[ "Register", "filter", "for", "including", "excluding", "attributes", "in", "graphs", "through", "the", "use", "of", "include_<name", ">", "and", "exclude_<name", ">", "environment", "variables", ".", "The", "value", "of", "the", "variables", "must", "be", "a", "comma", "separated", "list", "of", "items", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L366-L387
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin.envCheckFilter
def envCheckFilter(self, name, attr): """Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled. """ flt = self._filters.get(name) if flt: return flt.check(attr) else: raise AttributeError("Undefined filter: %s" % name)
python
def envCheckFilter(self, name, attr): """Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled. """ flt = self._filters.get(name) if flt: return flt.check(attr) else: raise AttributeError("Undefined filter: %s" % name)
[ "def", "envCheckFilter", "(", "self", ",", "name", ",", "attr", ")", ":", "flt", "=", "self", ".", "_filters", ".", "get", "(", "name", ")", "if", "flt", ":", "return", "flt", ".", "check", "(", "attr", ")", "else", ":", "raise", "AttributeError", "(", "\"Undefined filter: %s\"", "%", "name", ")" ]
Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled.
[ "Check", "if", "a", "specific", "graph", "attribute", "is", "enabled", "or", "disabled", "through", "the", "use", "of", "a", "filter", "based", "on", "include_<name", ">", "and", "exclude_<name", ">", "environment", "variables", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L389-L403
aouyar/PyMunin
pymunin/__init__.py
MuninPlugin.envCheckFlag
def envCheckFlag(self, name, default = False): """Check graph flag for enabling / disabling attributes through the use of <name> environment variable. @param name: Name of flag. (Also determines the environment variable name.) @param default: Boolean (True or False). Default value for flag. @return: Return True if the flag is enabled. """ if self._flags.has_key(name): return self._flags[name] else: val = self._env.get(name) if val is None: return default elif val.lower() in ['yes', 'on']: self._flags[name] = True return True elif val.lower() in ['no', 'off']: self._flags[name] = False return False else: raise AttributeError("Value for flag %s, must be yes, no, on or off" % name)
python
def envCheckFlag(self, name, default = False): """Check graph flag for enabling / disabling attributes through the use of <name> environment variable. @param name: Name of flag. (Also determines the environment variable name.) @param default: Boolean (True or False). Default value for flag. @return: Return True if the flag is enabled. """ if self._flags.has_key(name): return self._flags[name] else: val = self._env.get(name) if val is None: return default elif val.lower() in ['yes', 'on']: self._flags[name] = True return True elif val.lower() in ['no', 'off']: self._flags[name] = False return False else: raise AttributeError("Value for flag %s, must be yes, no, on or off" % name)
[ "def", "envCheckFlag", "(", "self", ",", "name", ",", "default", "=", "False", ")", ":", "if", "self", ".", "_flags", ".", "has_key", "(", "name", ")", ":", "return", "self", ".", "_flags", "[", "name", "]", "else", ":", "val", "=", "self", ".", "_env", ".", "get", "(", "name", ")", "if", "val", "is", "None", ":", "return", "default", "elif", "val", ".", "lower", "(", ")", "in", "[", "'yes'", ",", "'on'", "]", ":", "self", ".", "_flags", "[", "name", "]", "=", "True", "return", "True", "elif", "val", ".", "lower", "(", ")", "in", "[", "'no'", ",", "'off'", "]", ":", "self", ".", "_flags", "[", "name", "]", "=", "False", "return", "False", "else", ":", "raise", "AttributeError", "(", "\"Value for flag %s, must be yes, no, on or off\"", "%", "name", ")" ]
Check graph flag for enabling / disabling attributes through the use of <name> environment variable. @param name: Name of flag. (Also determines the environment variable name.) @param default: Boolean (True or False). Default value for flag. @return: Return True if the flag is enabled.
[ "Check", "graph", "flag", "for", "enabling", "/", "disabling", "attributes", "through", "the", "use", "of", "<name", ">", "environment", "variable", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L405-L429