_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q9900
|
PENMANCodec.handle_triple
|
train
|
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
|
python
|
{
"resource": ""
}
|
q9901
|
PENMANCodec._encode_penman
|
train
|
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
|
python
|
{
"resource": ""
}
|
q9902
|
Graph.reentrancies
|
train
|
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
python
|
{
"resource": ""
}
|
q9903
|
check_1d
|
train
|
def check_1d(inp):
"""
Check input to be a vector. Converts lists to np.ndarray.
Parameters
----------
inp : obj
Input vector
Returns
-------
numpy.ndarray or None
Input vector or None
Examples
--------
>>> check_1d([0, 1, 2, 3])
[0, 1, 2, 3]
>>> check_1d('test')
None
"""
if isinstance(inp, list):
return check_1d(np.array(inp))
if isinstance(inp, np.ndarray):
if inp.ndim == 1: # input is a vector
return inp
|
python
|
{
"resource": ""
}
|
q9904
|
check_2d
|
train
|
def check_2d(inp):
"""
Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
Examples
--------
>>> check_2d([[0, 1], [2, 3]])
[[0, 1], [2, 3]]
>>> check_2d('test')
None
"""
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if inp.ndim == 2: # input is a dense matrix
return inp
if sps.issparse(inp):
if inp.ndim == 2: # input is a sparse matrix
return inp
|
python
|
{
"resource": ""
}
|
q9905
|
graph_to_laplacian
|
train
|
def graph_to_laplacian(G, normalized=True):
"""
Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None
"""
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass
|
python
|
{
"resource": ""
}
|
q9906
|
netlsd
|
train
|
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes NetLSD signature from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
kernel : str
Either 'heat' or 'wave'. Type of a kernel to use for computation.
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
NetLSD signature
"""
if kernel not in {'heat', 'wave'}:
raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel))
if not isinstance(normalized_laplacian, bool):
raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian))
if not isinstance(eigenvalues, (int, tuple, str)):
raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues)))
if not isinstance(timescales, np.ndarray):
raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales)))
if timescales.ndim != 1:
raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim))
if normalization not in {'complete', 'empty', 'none', True, False, None}:
if not isinstance(normalization, np.ndarray):
raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization))
if normalization.ndim != 1:
raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim))
if timescales.shape[0] != normalization.shape[0]:
raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0]))
eivals = check_1d(inp)
if eivals is None:
mat = check_2d(inp)
if mat is None:
mat = graph_to_laplacian(inp, normalized_laplacian)
if mat is None:
raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp)))
else:
mat = mat_to_laplacian(inp, normalized_laplacian)
eivals = eigenvalues_auto(mat, eigenvalues)
if kernel == 'heat':
return _hkt(eivals, timescales, normalization, normalized_laplacian)
else:
return _wkt(eivals, timescales, normalization, normalized_laplacian)
|
python
|
{
"resource": ""
}
|
q9907
|
heat
|
train
|
def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes heat kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
"""
return netlsd(inp, timescales, 'heat', eigenvalues, normalization, normalized_laplacian)
|
python
|
{
"resource": ""
}
|
q9908
|
wave
|
train
|
def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes wave kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
return netlsd(inp, timescales, 'wave', eigenvalues, normalization, normalized_laplacian)
|
python
|
{
"resource": ""
}
|
q9909
|
_hkt
|
train
|
def _hkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes heat kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
"""
nv = eivals.shape[0]
hkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
hkt[idx] = np.sum(np.exp(-t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return hkt / nv
if normalization == 'complete':
if normalized_laplacian:
return hkt / (1 + (nv - 1) * np.exp(-timescales))
else:
return hkt / (1 + nv * np.exp(-nv * timescales))
return hkt
|
python
|
{
"resource": ""
}
|
q9910
|
_wkt
|
train
|
def _wkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
"""
nv = eivals.shape[0]
wkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
wkt[idx] = np.sum(np.exp(-1j * t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return wkt / nv
if normalization == 'complete':
if normalized_laplacian:
return wkt / (1 + (nv - 1) * np.cos(timescales))
else:
return wkt / (1 + (nv - 1) * np.cos(nv * timescales))
return wkt
|
python
|
{
"resource": ""
}
|
q9911
|
SortedListWithKey.clear
|
train
|
def clear(self):
"""Remove all the elements from the list."""
self._len = 0
del self._maxes[:]
del self._lists[:]
del self._keys[:]
del self._index[:]
|
python
|
{
"resource": ""
}
|
q9912
|
SortedListWithKey.islice
|
train
|
def islice(self, start=None, stop=None, reverse=False):
"""
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
"""
_len = self._len
if not _len:
return iter(())
start, stop, step = self._slice(slice(start, stop))
if start >= stop:
return iter(())
_pos = self._pos
min_pos, min_idx = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1])
else:
max_pos, max_idx = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
python
|
{
"resource": ""
}
|
q9913
|
SortedListWithKey.copy
|
train
|
def copy(self):
"""Return a shallow copy of the sorted list."""
return self.__class__(self, key=self._key, load=self._load)
|
python
|
{
"resource": ""
}
|
q9914
|
not26
|
train
|
def not26(func):
"""Function decorator for methods not implemented in Python 2.6."""
@wraps(func)
def errfunc(*args, **kwargs):
raise NotImplementedError
if hexversion < 0x02070000:
return errfunc
else:
return func
|
python
|
{
"resource": ""
}
|
q9915
|
SortedDict.copy
|
train
|
def copy(self):
"""Return a shallow copy of the sorted dictionary."""
return self.__class__(self._key, self._load, self._iteritems())
|
python
|
{
"resource": ""
}
|
q9916
|
SummaryTracker.create_summary
|
train
|
def create_summary(self):
"""Return a summary.
See also the notes on ignore_self in the class as well as the
initializer documentation.
"""
if not self.ignore_self:
res = summary.summarize(muppy.get_objects())
else:
# If the user requested the data required to store summaries to be
# ignored in the summaries, we need to identify all objects which
# are related to each summary stored.
# Thus we build a list of all objects used for summary storage as
# well as a dictionary which tells us how often an object is
# referenced by the summaries.
# During this identification process, more objects are referenced,
# namely int objects identifying referenced objects as well as the
# correspondind count.
# For all these objects it will be checked wether they are
# referenced from outside the monitor's scope. If not, they will be
# subtracted from the snapshot summary, otherwise they are
# included (as this indicates that they are relevant to the
# application).
all_of_them = [] # every single object
ref_counter = {} # how often it is referenced; (id(o), o) pairs
def store_info(o):
all_of_them.append(o)
if id(o) in ref_counter:
ref_counter[id(o)] += 1
else:
ref_counter[id(o)] = 1
# store infos on every single object related to the summaries
store_info(self.summaries)
for k, v in self.summaries.items():
store_info(k)
summary._traverse(v, store_info)
# do the summary
res = summary.summarize(muppy.get_objects())
# remove ids stored in the ref_counter
for _id in ref_counter:
# referenced in frame, ref_counter, ref_counter.keys()
if len(gc.get_referrers(_id)) == (3):
summary._subtract(res, _id)
for o in all_of_them:
# referenced in frame, summary, all_of_them
if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
summary._subtract(res, o)
return res
|
python
|
{
"resource": ""
}
|
q9917
|
SummaryTracker.diff
|
train
|
def diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
res = None
if summary2 is None:
self.s1 = self.create_summary()
if summary1 is None:
res = summary.get_diff(self.s0, self.s1)
else:
res = summary.get_diff(summary1, self.s1)
self.s0 = self.s1
else:
if summary1 is not None:
res = summary.get_diff(summary1, summary2)
else:
raise ValueError("You cannot provide summary2 without summary1.")
return summary._sweep(res)
|
python
|
{
"resource": ""
}
|
q9918
|
SummaryTracker.print_diff
|
train
|
def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
summary.print_(self.diff(summary1=summary1, summary2=summary2))
|
python
|
{
"resource": ""
}
|
q9919
|
ObjectTracker._get_objects
|
train
|
def _get_objects(self, ignore=[]):
"""Get all currently existing objects.
XXX - ToDo: This method is a copy&paste from muppy.get_objects, but
some modifications are applied. Specifically, it allows to ignore
objects (which includes the current frame).
keyword arguments
ignore -- list of objects to ignore
"""
def remove_ignore(objects, ignore=[]):
# remove all objects listed in the ignore list
res = []
for o in objects:
if not compat.object_in_list(o, ignore):
res.append(o)
return res
tmp = gc.get_objects()
ignore.append(inspect.currentframe()) #PYCHOK change ignore
ignore.append(self) #PYCHOK change ignore
if hasattr(self, 'o0'): ignore.append(self.o0) #PYCHOK change ignore
if hasattr(self, 'o1'): ignore.append(self.o1) #PYCHOK change ignore
ignore.append(ignore) #PYCHOK change ignore
ignore.append(remove_ignore) #PYCHOK change ignore
# this implies that referenced objects are also ignored
tmp = remove_ignore(tmp, ignore)
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects referenced by them
refs = muppy.get_referents(o)
for ref in refs:
if not muppy._is_containerobject(ref):
# we already got the container objects, now we only add
# non-container objects
res.append(ref)
res.extend(tmp)
res = muppy._remove_duplicates(res)
if ignore is not None:
# repeat to filter out objects which may have been referenced
res = remove_ignore(res, ignore)
# manual cleanup, see comment above
del ignore[:]
return res
|
python
|
{
"resource": ""
}
|
q9920
|
ObjectTracker.get_diff
|
train
|
def get_diff(self, ignore=[]):
"""Get the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
ignore.append(inspect.currentframe()) #PYCHOK change ignore
self.o1 = self._get_objects(ignore)
diff = muppy.get_diff(self.o0, self.o1)
self.o0 = self.o1
# manual cleanup, see comment above
del ignore[:] #PYCHOK change ignore
return diff
|
python
|
{
"resource": ""
}
|
q9921
|
ObjectTracker.print_diff
|
train
|
def print_diff(self, ignore=[]):
"""Print the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
ignore.append(inspect.currentframe()) #PYCHOK change ignore
diff = self.get_diff(ignore)
print("Added objects:")
summary.print_(summary.summarize(diff['+']))
print("Removed objects:")
summary.print_(summary.summarize(diff['-']))
# manual cleanup, see comment above
del ignore[:]
|
python
|
{
"resource": ""
}
|
q9922
|
jaccard
|
train
|
def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2))
|
python
|
{
"resource": ""
}
|
q9923
|
sorensen
|
train
|
def sorensen(seq1, seq2):
"""Compute the Sorensen distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - (2 * len(set1 & set2) / float(len(set1) + len(set2)))
|
python
|
{
"resource": ""
}
|
q9924
|
_long2bytes
|
train
|
def _long2bytes(n, blocksize=0):
"""Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize.
"""
# After much testing, this algorithm was deemed to be the fastest.
s = ''
pack = struct.pack
while n > 0:
### CHANGED FROM '>I' TO '<I'. (DCG)
s = pack('<I', n & 0xffffffffL) + s
### --------------------------
n = n >> 32
# Strip off leading zeros.
for i in range(len(s)):
if s[i] <> '\000':
break
else:
# Only happens when n == 0.
s = '\000'
i = 0
s = s[i:]
# Add back some pad bytes. This could be done more efficiently
# w.r.t. the de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * '\000' + s
return s
|
python
|
{
"resource": ""
}
|
q9925
|
MD5.init
|
train
|
def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
# Load magic initialization constants.
self.A = 0x67452301L
self.B = 0xefcdab89L
self.C = 0x98badcfeL
self.D = 0x10325476L
|
python
|
{
"resource": ""
}
|
q9926
|
MeliaeAdapter.value
|
train
|
def value( self, node, parent=None ):
"""Return value used to compare size of this node"""
# this is the *weighted* size/contribution of the node
try:
return node['contribution']
except KeyError, err:
contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1))
node['contribution'] = contribution
return contribution
|
python
|
{
"resource": ""
}
|
q9927
|
MeliaeAdapter.label
|
train
|
def label( self, node ):
"""Return textual description of this node"""
result = []
if node.get('type'):
result.append( node['type'] )
if node.get('name' ):
result.append( node['name'] )
elif node.get('value') is not None:
result.append( unicode(node['value'])[:32])
if 'module' in node and not node['module'] in result:
result.append( ' in %s'%( node['module'] ))
if node.get( 'size' ):
result.append( '%s'%( mb( node['size'] )))
if node.get( 'totsize' ):
result.append( '(%s)'%( mb( node['totsize'] )))
parent_count = len( node.get('parents',()))
if parent_count > 1:
result.append( '/%s refs'%( parent_count ))
return " ".join(result)
|
python
|
{
"resource": ""
}
|
q9928
|
MeliaeAdapter.best_parent
|
train
|
def best_parent( self, node, tree_type=None ):
"""Choose the best parent for a given node"""
parents = self.parents(node)
selected_parent = None
if node['type'] == 'type':
module = ".".join( node['name'].split( '.' )[:-1] )
if module:
for mod in parents:
if mod['type'] == 'module' and mod['name'] == module:
selected_parent = mod
if parents and selected_parent is None:
parents.sort( key = lambda x: self.value(node, x) )
return parents[-1]
return selected_parent
|
python
|
{
"resource": ""
}
|
q9929
|
Stats.load_stats
|
train
|
def load_stats(self, fdump):
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, type('')):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = []
|
python
|
{
"resource": ""
}
|
q9930
|
Stats.annotate_snapshot
|
train
|
def annotate_snapshot(self, snapshot):
"""
Store additional statistical data in snapshot.
"""
if hasattr(snapshot, 'classes'):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname]['merged'] = merged
|
python
|
{
"resource": ""
}
|
q9931
|
ConsoleStats.print_object
|
train
|
def print_object(self, tobj):
"""
Print the gathered information of object `tobj` in human-readable format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=1),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
self.stream.write(_format_trace(tobj.trace))
for (timestamp, size) in tobj.snapshots:
self.stream.write(' %-30s %s\n' % (
pp_timestamp(timestamp), pp(size.size)
))
self._print_refs(size.refs, size.size)
if tobj.death is not None:
self.stream.write(' %-30s finalize\n' % (
pp_timestamp(tobj.death),
))
|
python
|
{
"resource": ""
}
|
q9932
|
ConsoleStats.print_stats
|
train
|
def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj)
|
python
|
{
"resource": ""
}
|
q9933
|
ConsoleStats.print_summary
|
train
|
def print_summary(self):
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY '+'-'*66+'\n')
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
trunc(snapshot.desc, 35),
'active',
pp(snapshot.asizeof_total),
'average',
'pct'
))
for classname in classlist:
info = snapshot.classes.get(classname)
fobj.write(' %-33s %11d %12s %12s %4d%%\n' % (
trunc(classname, 33),
info['active'],
pp(info['sum']),
pp(info['avg']),
info['pct']
))
fobj.write('-'*79+'\n')
|
python
|
{
"resource": ""
}
|
q9934
|
HtmlStats.print_class_details
|
train
|
def print_class_details(self, fname, classname):
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj in self.index[classname]:
fobj.write('<table id="tl" width="100%" rules="rows">\n')
fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id))
if tobj.repr:
fobj.write("<tr><td>Representation</td><td>%s </td></tr>\n" % tobj.repr)
fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
if tobj.trace:
trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace)
for (timestamp, size) in tobj.snapshots:
fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
if not size.refs:
fobj.write("<td>%s</td></tr>\n" % pp(size.size))
else:
fobj.write("<td>%s" % pp(size.size))
self._print_refs(fobj, size.refs, size.size)
fobj.write("</td></tr>\n")
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
|
python
|
{
"resource": ""
}
|
q9935
|
HtmlStats.relative_path
|
train
|
def relative_path(self, filepath, basepath=None):
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if not basepath:
return filepath
if filepath.startswith(basepath):
rel = filepath[len(basepath):]
if rel and rel[0] == os.sep:
rel = rel[1:]
return rel
|
python
|
{
"resource": ""
}
|
q9936
|
HtmlStats.create_title_page
|
train
|
def create_title_page(self, filename, title=''):
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
data = snapshot.classes[classname].copy()
data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname)
data['sum'] = pp(data['sum'])
data['avg'] = pp(data['avg'])
fobj.write(self.snapshot_cls % data)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
|
python
|
{
"resource": ""
}
|
q9937
|
HtmlStats.create_lifetime_chart
|
train
|
def create_lifetime_chart(self, classname, filename=''):
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname+" lifetime")
cnt = []
for tobj in self.index[classname]:
cnt.append([tobj.birth, 1])
if tobj.death:
cnt.append([tobj.death, -1])
cnt.sort()
for i in range(1, len(cnt)):
cnt[i][1] += cnt[i-1][1]
#if cnt[i][0] == cnt[i-1][0]:
# del cnt[i-1]
x = [t for [t,c] in cnt]
y = [c for [t,c] in cnt]
figure()
xlabel("Execution time [s]")
ylabel("Instance #")
title("%s instances" % classname)
plot(x, y, 'o')
savefig(filename)
return self.chart_tag % (os.path.basename(filename))
|
python
|
{
"resource": ""
}
|
q9938
|
HtmlStats.create_snapshot_chart
|
train
|
def create_snapshot_chart(self, filename=''):
"""
Create chart that depicts the memory allocation over time apportioned to
the tracked classes.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots]
if max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots]
sz = [sx+sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append( ((xp, yp), {'label': cn}) )
poly_labels.append(cn)
base = sz
figure()
title("Snapshot Memory")
xlabel("Execution Time [s]")
ylabel("Virtual Memory [MiB]")
sizes = [float(fp.asizeof_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'r--', label='Total')
sizes = [float(fp.tracked_total)/(1024*1024) for fp in self.snapshots]
plot(times, sizes, 'b--', label='Tracked total')
for (args, kwds) in polys:
fill(*args, **kwds)
legend(loc=2)
savefig(filename)
return self.chart_tag % (self.relative_path(filename))
|
python
|
{
"resource": ""
}
|
q9939
|
HtmlStats.create_pie_chart
|
train
|
def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename))
|
python
|
{
"resource": ""
}
|
q9940
|
HtmlStats.create_html
|
train
|
def create_html(self, fname, title="ClassTracker Statistics"):
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {}
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, 'fp%d.png' % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png')
self.charts[cn] = self.create_lifetime_chart(cn, fn)
# Create HTML pages first for each class and then the index page.
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html')
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title)
|
python
|
{
"resource": ""
}
|
q9941
|
Path.write_bytes
|
train
|
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data)
|
python
|
{
"resource": ""
}
|
q9942
|
Path.write_text
|
train
|
def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__class__.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
|
python
|
{
"resource": ""
}
|
q9943
|
sort_group
|
train
|
def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
# No element, then we just return
if base_elt[1] is None:
return None
# Else, we will now group equivalent files together (remember we are working on multiple directories, so we can have multiple equivalent relative filepaths, but of course the absolute filepaths are different).
else:
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = []
lst.append([base_elt])
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
if elt[1] is not None:
# If the filepath is the same to the latest grouped filepath, we add it to the same group
if elt[1] == base_elt[1]:
lst[-1].append(elt)
# Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath
else:
if return_only_first: break # break here if we only need the first group
lst.append([elt])
base_elt = elt # replace the latest grouped filepath
return lst
|
python
|
{
"resource": ""
}
|
q9944
|
RefBrowser.get_tree
|
train
|
def get_tree(self):
"""Get a tree of referrers of the root object."""
self.ignore.append(inspect.currentframe())
return self._get_tree(self.root, self.maxdepth)
|
python
|
{
"resource": ""
}
|
q9945
|
RefBrowser._get_tree
|
train
|
def _get_tree(self, root, maxdepth):
"""Workhorse of the get_tree implementation.
This is an recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build.
"""
self.ignore.append(inspect.currentframe())
res = _Node(root, self.str_func) #PYCHOK use root parameter
self.already_included.add(id(root)) #PYCHOK use root parameter
if maxdepth == 0:
return res
objects = gc.get_referrers(root) #PYCHOK use root parameter
self.ignore.append(objects)
for o in objects:
# XXX: find a better way to ignore dict of _Node objects
if isinstance(o, dict):
sampleNode = _Node(1)
if list(sampleNode.__dict__.keys()) == list(o.keys()):
continue
_id = id(o)
if not self.repeat and (_id in self.already_included):
s = self.str_func(o)
res.children.append("%s (already included, id %s)" %\
(s, _id))
continue
if (not isinstance(o, _Node)) and (o not in self.ignore):
res.children.append(self._get_tree(o, maxdepth-1))
return res
|
python
|
{
"resource": ""
}
|
q9946
|
StreamBrowser.print_tree
|
train
|
def print_tree(self, tree=None):
""" Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject.
"""
if tree is None:
self._print(self.root, '', '')
else:
self._print(tree, '', '')
|
python
|
{
"resource": ""
}
|
q9947
|
StreamBrowser._print
|
train
|
def _print(self, tree, prefix, carryon):
"""Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines
"""
level = prefix.count(self.cross) + prefix.count(self.vline)
len_children = 0
if isinstance(tree , _Node):
len_children = len(tree.children)
# add vertex
prefix += str(tree)
# and as many spaces as the vertex is long
carryon += self.space * len(str(tree))
if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\
(len_children == 0):
self.stream.write(prefix+'\n')
return
else:
# add in between connections
prefix += self.hline
carryon += self.space
# if there is more than one branch, add a cross
if len(tree.children) > 1:
prefix += self.cross
carryon += self.vline
prefix += self.hline
carryon += self.space
if len_children > 0:
# print the first branch (on the same line)
self._print(tree.children[0], prefix, carryon)
for b in range(1, len_children):
# the caryon becomes the prefix for all following children
prefix = carryon[:-2] + self.cross + self.hline
# remove the vlines for any children of last branch
if b == (len_children-1):
carryon = carryon[:-2] + 2*self.space
self._print(tree.children[b], prefix, carryon)
# leave a free line before the next branch
if b == (len_children-1):
if len(carryon.strip(' ')) == 0:
return
self.stream.write(carryon[:-2].rstrip()+'\n')
|
python
|
{
"resource": ""
}
|
q9948
|
InteractiveBrowser.main
|
train
|
def main(self, standalone=False):
"""Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
"""
window = _Tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg="white",\
highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop()
|
python
|
{
"resource": ""
}
|
q9949
|
format_meter
|
train
|
def format_meter(n, total, elapsed, ncols=None, prefix='',
unit=None, unit_scale=False, ascii=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If None, only basic progress
statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If sepcified, dynamically
resizes the progress meter [default: None]. The fallback meter
width is 10.
prefix : str, optional
Prefix message (included in total width).
unit : str, optional
String that will be used to define the unit of each iteration.
[default: "it"]
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.). [default: False]
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
if elapsed:
if unit_scale:
rate = format_sizeof(n / elapsed, suffix='')
else:
rate = '{0:5.2f}'.format(n / elapsed)
else:
rate = '?'
rate_unit = unit if unit else 'it'
if not unit:
unit = ''
n_fmt = str(n)
total_fmt = str(total)
if unit_scale:
n_fmt = format_sizeof(n, suffix='')
if total:
total_fmt = format_sizeof(total, suffix='')
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = '{1}{0:.0f}%|'.format(percentage, prefix) if prefix else \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1}{2} [{3}<{4}, {5} {6}/s]'.format(
n_fmt, total_fmt, unit, elapsed_str, remaining_str,
rate, rate_unit)
if ncols == 0:
bar = ''
else:
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#'*bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588)*bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0) # spacing
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0) # spacing
return l_bar + full_bar + r_bar
else: # no progressbar nor ETA, just progress statistics
return '{0}{1} [{2}, {3} {4}/s]'.format(
n_fmt, unit, elapsed_str, rate, rate_unit)
|
python
|
{
"resource": ""
}
|
q9950
|
getIcon
|
train
|
def getIcon( data ):
"""Return the data from the resource as a wxIcon"""
import cStringIO
stream = cStringIO.StringIO(data)
image = wx.ImageFromStream(stream)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.BitmapFromImage(image))
return icon
|
python
|
{
"resource": ""
}
|
q9951
|
main
|
train
|
def main():
"""Mainloop for the application"""
logging.basicConfig(level=logging.INFO)
app = RunSnakeRunApp(0)
app.MainLoop()
|
python
|
{
"resource": ""
}
|
q9952
|
MainFrame.CreateMenuBar
|
train
|
def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
# self.packageMenuItem = menu.AppendCheckItem(
# ID_PACKAGE_VIEW, _('&File View'),
# _('View time spent by package/module')
# )
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
# it's more about how to generate graphics to describe profiling...
self.deeperViewItem = menu.Append(
ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views')
)
self.shallowerViewItem = menu.Append(
ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views')
)
# wx.ToolTip.Enable(True)
menubar.Append(menu, _('&View'))
self.viewTypeMenu =wx.Menu( )
menubar.Append(self.viewTypeMenu, _('View &Type'))
self.SetMenuBar(menubar)
wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True))
wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile)
wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory)
wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView)
wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView)
wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView)
wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView)
wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView)
wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView)
wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle)
|
python
|
{
"resource": ""
}
|
q9953
|
MainFrame.CreateSourceWindow
|
train
|
def CreateSourceWindow(self, tabs):
"""Create our source-view window for tabs"""
if editor and self.sourceCodeControl is None:
self.sourceCodeControl = wx.py.editwindow.EditWindow(
self.tabs, -1
)
self.sourceCodeControl.SetText(u"")
self.sourceFileShown = None
self.sourceCodeControl.setDisplayLineNumbers(True)
|
python
|
{
"resource": ""
}
|
q9954
|
MainFrame.SetupToolBar
|
train
|
def SetupToolBar(self):
"""Create the toolbar for common actions"""
tb = self.CreateToolBar(self.TBFLAGS)
tsize = (24, 24)
tb.ToolBitmapSize = tsize
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,
tsize)
tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open",
longHelp="Open a (c)Profile trace file")
if not osx:
tb.AddSeparator()
# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)
self.rootViewTool = tb.AddLabelTool(
ID_ROOT_VIEW, _("Root View"),
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),
shortHelp=_("Display the root of the current view tree (home view)")
)
self.rootViewTool = tb.AddLabelTool(
ID_BACK_VIEW, _("Back"),
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),
shortHelp=_("Back to the previously activated node in the call tree")
)
self.upViewTool = tb.AddLabelTool(
ID_UP_VIEW, _("Up"),
wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),
shortHelp=_("Go one level up the call tree (highest-percentage parent)")
)
if not osx:
tb.AddSeparator()
# TODO: figure out why the control is sizing the label incorrectly on Linux
self.percentageViewTool = wx.CheckBox(tb, -1, _("Percent "))
self.percentageViewTool.SetToolTip(wx.ToolTip(
_("Toggle display of percentages in list views")))
tb.AddControl(self.percentageViewTool)
wx.EVT_CHECKBOX(self.percentageViewTool,
self.percentageViewTool.GetId(), self.OnPercentageView)
self.viewTypeTool= wx.Choice( tb, -1, choices= getattr(self.loader,'ROOTS',[]) )
self.viewTypeTool.SetToolTip(wx.ToolTip(
_("Switch between different hierarchic views of the data")))
wx.EVT_CHOICE( self.viewTypeTool, self.viewTypeTool.GetId(), self.OnViewTypeTool )
tb.AddControl( self.viewTypeTool )
tb.Realize()
|
python
|
{
"resource": ""
}
|
q9955
|
MainFrame.OnViewTypeTool
|
train
|
def OnViewTypeTool( self, event ):
"""When the user changes the selection, make that our selection"""
new = self.viewTypeTool.GetStringSelection()
if new != self.viewType:
self.viewType = new
self.OnRootView( event )
|
python
|
{
"resource": ""
}
|
q9956
|
MainFrame.SetPercentageView
|
train
|
def SetPercentageView(self, percentageView):
"""Set whether to display percentage or absolute values"""
self.percentageView = percentageView
self.percentageMenuItem.Check(self.percentageView)
self.percentageViewTool.SetValue(self.percentageView)
total = self.adapter.value( self.loader.get_root( self.viewType ) )
for control in self.ProfileListControls:
control.SetPercentage(self.percentageView, total)
self.adapter.SetPercentage(self.percentageView, total)
|
python
|
{
"resource": ""
}
|
q9957
|
MainFrame.OnUpView
|
train
|
def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node'))
|
python
|
{
"resource": ""
}
|
q9958
|
MainFrame.OnBackView
|
train
|
def OnBackView(self, event):
"""Request to move backward in the history"""
self.historyIndex -= 1
try:
self.RestoreHistory(self.history[self.historyIndex])
except IndexError, err:
self.SetStatusText(_('No further history available'))
|
python
|
{
"resource": ""
}
|
q9959
|
MainFrame.OnRootView
|
train
|
def OnRootView(self, event):
"""Reset view to the root of the tree"""
self.adapter, tree, rows = self.RootNode()
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory()
self.ConfigureViewTypeChoices()
|
python
|
{
"resource": ""
}
|
q9960
|
MainFrame.OnNodeActivated
|
train
|
def OnNodeActivated(self, event):
"""Double-click or enter on a node in some control..."""
self.activated_node = self.selected_node = event.node
self.squareMap.SetModel(event.node, self.adapter)
self.squareMap.SetSelected( event.node )
if editor:
if self.SourceShowFile(event.node):
if hasattr(event.node,'lineno'):
self.sourceCodeControl.GotoLine(event.node.lineno)
self.RecordHistory()
|
python
|
{
"resource": ""
}
|
q9961
|
MainFrame.RecordHistory
|
train
|
def RecordHistory(self):
"""Add the given node to the history-set"""
if not self.restoringHistory:
record = self.activated_node
if self.historyIndex < -1:
try:
del self.history[self.historyIndex+1:]
except AttributeError, err:
pass
if (not self.history) or record != self.history[-1]:
self.history.append(record)
del self.history[:-200]
self.historyIndex = -1
|
python
|
{
"resource": ""
}
|
q9962
|
MainFrame.RootNode
|
train
|
def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows
|
python
|
{
"resource": ""
}
|
q9963
|
MainFrame.SaveState
|
train
|
def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', str(size[0]) )
config_parser.set( 'window', 'height', str(size[1]) )
config_parser.set( 'window', 'x', str(position[0]) )
config_parser.set( 'window', 'y', str(position[1]) )
for control in self.ProfileListControls:
control.SaveState( config_parser )
return config_parser
|
python
|
{
"resource": ""
}
|
q9964
|
MainFrame.LoadState
|
train
|
def LoadState( self, config_parser ):
"""Set our window state from the given config_parser instance"""
if not config_parser:
return
if (
not config_parser.has_section( 'window' ) or (
config_parser.has_option( 'window','maximized' ) and
config_parser.getboolean( 'window', 'maximized' )
)
):
self.Maximize(True)
try:
width,height,x,y = [
config_parser.getint( 'window',key )
for key in ['width','height','x','y']
]
self.SetPosition( (x,y))
self.SetSize( (width,height))
except ConfigParser.NoSectionError, err:
# the file isn't written yet, so don't even warn...
pass
except Exception, err:
# this is just convenience, if it breaks in *any* way, ignore it...
log.error(
"Unable to load window preferences, ignoring: %s", traceback.format_exc()
)
try:
font_size = config_parser.getint('window', 'font_size')
except Exception:
pass # use the default, by default
else:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font_size)
for ctrl in self.ProfileListControls:
ctrl.SetFont(font)
for control in self.ProfileListControls:
control.LoadState( config_parser )
self.config = config_parser
wx.EVT_CLOSE( self, self.OnCloseWindow )
|
python
|
{
"resource": ""
}
|
q9965
|
is_file
|
train
|
def is_file(dirname):
'''Checks if a path is an actual file that exists'''
if not os.path.isfile(dirname):
msg = "{0} is not an existing file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
|
python
|
{
"resource": ""
}
|
q9966
|
is_dir
|
train
|
def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
|
python
|
{
"resource": ""
}
|
q9967
|
is_dir_or_file
|
train
|
def is_dir_or_file(dirname):
'''Checks if a path is an actual directory that exists or a file'''
if not os.path.isdir(dirname) and not os.path.isfile(dirname):
msg = "{0} is not a directory nor a file".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
|
python
|
{
"resource": ""
}
|
q9968
|
fullpath
|
train
|
def fullpath(relpath):
'''Relative path to absolute'''
if (type(relpath) is object or type(relpath) is file):
relpath = relpath.name
return os.path.abspath(os.path.expanduser(relpath))
|
python
|
{
"resource": ""
}
|
q9969
|
remove_if_exist
|
train
|
def remove_if_exist(path): # pragma: no cover
"""Delete a file or a directory recursively if it exists, else no exception is raised"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
elif os.path.isfile(path):
os.remove(path)
return True
return False
|
python
|
{
"resource": ""
}
|
q9970
|
copy_any
|
train
|
def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
srcfile = os.path.join(dirpath, filepath)
relpath = os.path.relpath(srcfile, src)
dstfile = os.path.join(dst, relpath)
if not os.path.exists(dstfile):
create_dir_if_not_exist(os.path.dirname(dstfile))
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
return True
elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)):
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return True
return False
|
python
|
{
"resource": ""
}
|
q9971
|
group_files_by_size
|
train
|
def group_files_by_size(fileslist, multi): # pragma: no cover
''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C)
'''
flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True))
if multi <= 1:
fgrouped = {}
i = 0
for x in flord.keys():
i += 1
fgrouped[i] = [[x]]
return fgrouped
fgrouped = {}
i = 0
while flord:
i += 1
fgrouped[i] = []
big_key, big_value = flord.popitem(0)
fgrouped[i].append([big_key])
for j in xrange(multi-1):
cluster = []
if not flord: break
child_key, child_value = flord.popitem(0)
cluster.append(child_key)
if child_value == big_value:
fgrouped[i].append(cluster)
continue
else:
diff = big_value - child_value
for key, value in flord.iteritems():
if value <= diff:
cluster.append(key)
del flord[key]
if value == diff:
break
else:
child_value += value
diff = big_value - child_value
fgrouped[i].append(cluster)
return fgrouped
|
python
|
{
"resource": ""
}
|
q9972
|
group_files_by_size_fast
|
train
|
def group_files_by_size_fast(fileslist, nbgroups, mode=1): # pragma: no cover
'''Given a files list with sizes, output a list where the files are grouped in nbgroups per cluster.
Pseudo-code for algorithm in O(n log(g)) (thank's to insertion sort or binary search trees)
See for more infos: http://cs.stackexchange.com/questions/44406/fast-algorithm-for-clustering-groups-of-elements-given-their-size-time/44614#44614
For each file:
- If to-fill list is empty or file.size > first-key(to-fill):
* Create cluster c with file in first group g1
* Add to-fill[file.size].append([c, g2], [c, g3], ..., [c, gn])
- Else:
* ksize = first-key(to-fill)
* c, g = to-fill[ksize].popitem(0)
* Add file to cluster c in group g
* nsize = ksize - file.size
* if nsize > 0:
. to-fill[nsize].append([c, g])
. sort to-fill if not an automatic ordering structure
'''
ftofill = SortedList()
ftofill_pointer = {}
fgrouped = [] # [] or {}
ford = sorted(fileslist.iteritems(), key=lambda x: x[1])
last_cid = -1
while ford:
fname, fsize = ford.pop()
#print "----\n"+fname, fsize
#if ftofill: print "beforebranch", fsize, ftofill[-1]
#print ftofill
if not ftofill or fsize > ftofill[-1]:
last_cid += 1
#print "Branch A: create cluster %i" % last_cid
fgrouped.append([])
#fgrouped[last_cid] = []
fgrouped[last_cid].append([fname])
if mode==0:
for g in xrange(nbgroups-1, 0, -1):
fgrouped[last_cid].append([])
if not fsize in ftofill_pointer:
ftofill_pointer[fsize] = []
ftofill_pointer[fsize].append((last_cid, g))
ftofill.add(fsize)
else:
for g in xrange(1, nbgroups):
try:
fgname, fgsize = ford.pop()
#print "Added to group %i: %s %i" % (g, fgname, fgsize)
except IndexError:
break
fgrouped[last_cid].append([fgname])
diff_size = fsize - fgsize
if diff_size > 0:
if not diff_size in ftofill_pointer:
ftofill_pointer[diff_size] = []
ftofill_pointer[diff_size].append((last_cid, g))
ftofill.add(diff_size)
else:
#print "Branch B"
ksize = ftofill.pop()
c, g = ftofill_pointer[ksize].pop()
#print "Assign to cluster %i group %i" % (c, g)
fgrouped[c][g].append(fname)
nsize = ksize - fsize
if nsize > 0:
if not nsize in ftofill_pointer:
ftofill_pointer[nsize] = []
ftofill_pointer[nsize].append((c, g))
ftofill.add(nsize)
return fgrouped
|
python
|
{
"resource": ""
}
|
q9973
|
grouped_count_sizes
|
train
|
def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover
'''Compute the total size per group and total number of files. Useful to check that everything is OK.'''
fsizes = {}
total_files = 0
allitems = None
if isinstance(fgrouped, dict):
allitems = fgrouped.iteritems()
elif isinstance(fgrouped, list):
allitems = enumerate(fgrouped)
for fkey, cluster in allitems:
fsizes[fkey] = []
for subcluster in cluster:
tot = 0
if subcluster is not None:
for fname in subcluster:
tot += fileslist[fname]
total_files += 1
fsizes[fkey].append(tot)
return fsizes, total_files
|
python
|
{
"resource": ""
}
|
q9974
|
ConfigPanel.GetOptions
|
train
|
def GetOptions(self):
"""
returns the collective values from all of the
widgets contained in the panel"""
values = [c.GetValue()
for c in chain(*self.widgets)
if c.GetValue() is not None]
return ' '.join(values)
|
python
|
{
"resource": ""
}
|
q9975
|
Positional.GetValue
|
train
|
def GetValue(self):
'''
Positionals have no associated options_string,
so only the supplied arguments are returned.
The order is assumed to be the same as the order
of declaration in the client code
Returns
"argument_value"
'''
self.AssertInitialization('Positional')
if str(self._widget.GetValue()) == EMPTY:
return None
return self._widget.GetValue()
|
python
|
{
"resource": ""
}
|
q9976
|
Flag.Update
|
train
|
def Update(self, size):
'''
Custom wrapper calculator to account for the
increased size of the _msg widget after being
inlined with the wx.CheckBox
'''
if self._msg is None:
return
help_msg = self._msg
width, height = size
content_area = int((width / 3) * .70)
wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05))
if help_msg.Size[0] not in wiggle_room:
self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' '))
self._msg.Wrap(content_area)
|
python
|
{
"resource": ""
}
|
q9977
|
get_path
|
train
|
def get_path(language):
''' Returns the full path to the language file '''
filename = language.lower() + '.json'
lang_file_path = os.path.join(_DEFAULT_DIR, filename)
if not os.path.exists(lang_file_path):
raise IOError('Could not find {} language file'.format(language))
return lang_file_path
|
python
|
{
"resource": ""
}
|
q9978
|
trunc
|
train
|
def trunc(obj, max, left=0):
"""
Convert `obj` to string, eliminate newlines and truncate the string to `max`
characters. If there are more characters in the string add ``...`` to the
string. With `left=True`, the string can be truncated at the beginning.
@note: Does not catch exceptions when converting `obj` to string with `str`.
>>> trunc('This is a long text.', 8)
This ...
>>> trunc('This is a long text.', 8, left)
...text.
"""
s = str(obj)
s = s.replace('\n', '|')
if len(s) > max:
if left:
return '...'+s[len(s)-max+3:]
else:
return s[:(max-3)]+'...'
else:
return s
|
python
|
{
"resource": ""
}
|
q9979
|
pp
|
train
|
def pp(i, base=1024):
"""
Pretty-print the integer `i` as a human-readable size representation.
"""
degree = 0
pattern = "%4d %s"
while i > base:
pattern = "%7.2f %s"
i = i / float(base)
degree += 1
scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB']
return pattern % (i, scales[degree])
|
python
|
{
"resource": ""
}
|
q9980
|
pp_timestamp
|
train
|
def pp_timestamp(t):
"""
Get a friendly timestamp represented as a string.
"""
if t is None:
return ''
h, m, s = int(t / 3600), int(t / 60 % 60), t % 60
return "%02d:%02d:%05.2f" % (h, m, s)
|
python
|
{
"resource": ""
}
|
q9981
|
GarbageGraph.print_stats
|
train
|
def print_stats(self, stream=None):
"""
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
"""
if not stream: # pragma: no cover
stream = sys.stdout
self.metadata.sort(key=lambda x: -x.size)
stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
for g in self.metadata:
stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12),
trunc(g.str, 46)))
stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \
(self.count, self.num_in_cycles, pp(self.total_size)))
|
python
|
{
"resource": ""
}
|
q9982
|
Profile.disable
|
train
|
def disable(self, threads=True):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
|
python
|
{
"resource": ""
}
|
q9983
|
Tee.flush
|
train
|
def flush(self):
""" Force commit changes to the file and stdout """
if not self.nostdout:
self.stdout.flush()
if self.file is not None:
self.file.flush()
|
python
|
{
"resource": ""
}
|
q9984
|
PStatsAdapter.parents
|
train
|
def parents(self, node):
"""Determine all parents of node in our tree"""
return [
parent for parent in
getattr( node, 'parents', [] )
if getattr(parent, 'tree', self.TREE) == self.TREE
]
|
python
|
{
"resource": ""
}
|
q9985
|
PStatsAdapter.filename
|
train
|
def filename( self, node ):
"""Extension to squaremap api to provide "what file is this" information"""
if not node.directory:
# TODO: any cases other than built-ins?
return None
if node.filename == '~':
# TODO: look up C/Cython/whatever source???
return None
return os.path.join(node.directory, node.filename)
|
python
|
{
"resource": ""
}
|
q9986
|
get_obj
|
train
|
def get_obj(ref):
"""Get object from string reference."""
oid = int(ref)
return server.id2ref.get(oid) or server.id2obj[oid]
|
python
|
{
"resource": ""
}
|
q9987
|
process
|
train
|
def process():
"""Get process overview."""
pmi = ProcessMemoryInfo()
threads = get_current_threads()
return dict(info=pmi, threads=threads)
|
python
|
{
"resource": ""
}
|
q9988
|
tracker_index
|
train
|
def tracker_index():
"""Get tracker overview."""
stats = server.stats
if stats and stats.snapshots:
stats.annotate()
timeseries = []
for cls in stats.tracked_classes:
series = []
for snapshot in stats.snapshots:
series.append(snapshot.classes.get(cls, {}).get('sum', 0))
timeseries.append((cls, series))
series = [s.overhead for s in stats.snapshots]
timeseries.append(("Profiling overhead", series))
if stats.snapshots[0].system_total.data_segment:
# Assume tracked data resides in the data segment
series = [s.system_total.data_segment - s.tracked_total - s.overhead
for s in stats.snapshots]
timeseries.append(("Data segment", series))
series = [s.system_total.code_segment for s in stats.snapshots]
timeseries.append(("Code segment", series))
series = [s.system_total.stack_segment for s in stats.snapshots]
timeseries.append(("Stack segment", series))
series = [s.system_total.shared_segment for s in stats.snapshots]
timeseries.append(("Shared memory", series))
else:
series = [s.total - s.tracked_total - s.overhead
for s in stats.snapshots]
timeseries.append(("Other", series))
return dict(snapshots=stats.snapshots, timeseries=timeseries)
else:
return dict(snapshots=[])
|
python
|
{
"resource": ""
}
|
q9989
|
tracker_class
|
train
|
def tracker_class(clsname):
"""Get class instance details."""
stats = server.stats
if not stats:
bottle.redirect('/tracker')
stats.annotate()
return dict(stats=stats, clsname=clsname)
|
python
|
{
"resource": ""
}
|
q9990
|
garbage_cycle
|
train
|
def garbage_cycle(index):
"""Get reference cycle details."""
graph = _compute_garbage_graphs()[int(index)]
graph.reduce_to_cycles()
objects = graph.metadata
objects.sort(key=lambda x: -x.size)
return dict(objects=objects, index=index)
|
python
|
{
"resource": ""
}
|
q9991
|
_get_graph
|
train
|
def _get_graph(graph, filename):
"""Retrieve or render a graph."""
try:
rendered = graph.rendered_file
except AttributeError:
try:
graph.render(os.path.join(server.tmpdir, filename), format='png')
rendered = filename
except OSError:
rendered = None
graph.rendered_file = rendered
return rendered
|
python
|
{
"resource": ""
}
|
q9992
|
garbage_graph
|
train
|
def garbage_graph(index):
"""Get graph representation of reference cycle."""
graph = _compute_garbage_graphs()[int(index)]
reduce_graph = bottle.request.GET.get('reduce', '')
if reduce_graph:
graph = graph.reduce_to_cycles()
if not graph:
return None
filename = 'garbage%so%s.png' % (index, reduce_graph)
rendered_file = _get_graph(graph, filename)
if rendered_file:
bottle.send_file(rendered_file, root=server.tmpdir)
else:
return None
|
python
|
{
"resource": ""
}
|
q9993
|
_winreg_getShellFolder
|
train
|
def _winreg_getShellFolder( name ):
"""Get a shell folder by string name from the registry"""
k = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
try:
# should check that it's valid? How?
return _winreg.QueryValueEx( k, name )[0]
finally:
_winreg.CloseKey( k )
|
python
|
{
"resource": ""
}
|
q9994
|
appdatadirectory
|
train
|
def appdatadirectory( ):
"""Attempt to retrieve the current user's app-data directory
This is the location where application-specific
files should be stored. On *nix systems, this will
be the ${HOME}/.config directory. On Win32 systems, it will be
the "Application Data" directory. Note that for
Win32 systems it is normal to create a sub-directory
for storing data in the Application Data directory.
"""
if shell:
# on Win32 and have Win32all extensions, best-case
return shell_getShellFolder(shellcon.CSIDL_APPDATA)
if _winreg:
# on Win32, but no Win32 shell com available, this uses
# a direct registry access, likely to fail on Win98/Me
return _winreg_getShellFolder( 'AppData' )
# okay, what if for some reason _winreg is missing? would we want to allow ctypes?
## default case, look for name in environ...
for name in ['APPDATA', 'HOME']:
if name in os.environ:
return os.path.join( os.environ[name], '.config' )
# well, someone's being naughty, see if we can get ~ to expand to a directory...
possible = os.path.abspath(os.path.expanduser( '~/.config' ))
if os.path.exists( possible ):
return possible
raise OSError( """Unable to determine user's application-data directory, no ${HOME} or ${APPDATA} in environment""" )
|
python
|
{
"resource": ""
}
|
q9995
|
get_objects
|
train
|
def get_objects(remove_dups=True, include_frames=False):
"""Return a list of all known objects excluding frame objects.
If (outer) frame objects shall be included, pass `include_frames=True`. In
order to prevent building reference cycles, the current frame object (of
the caller of get_objects) is ignored. This will not prevent creating
reference cycles if the object list is passed up the call-stack. Therefore,
frame objects are not included by default.
Keyword arguments:
remove_dups -- if True, all duplicate objects will be removed.
include_frames -- if True, includes frame objects.
"""
gc.collect()
# Do not initialize local variables before calling gc.get_objects or those
# will be included in the list. Furthermore, ignore frame objects to
# prevent reference cycles.
tmp = gc.get_objects()
tmp = [o for o in tmp if not isframe(o)]
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects referenced by them
refs = get_referents(o)
for ref in refs:
if not _is_containerobject(ref):
# we already got the container objects, now we only add
# non-container objects
res.append(ref)
res.extend(tmp)
if remove_dups:
res = _remove_duplicates(res)
if include_frames:
for sf in stack()[2:]:
res.append(sf[0])
return res
|
python
|
{
"resource": ""
}
|
q9996
|
get_size
|
train
|
def get_size(objects):
"""Compute the total size of all elements in objects."""
res = 0
for o in objects:
try:
res += _getsizeof(o)
except AttributeError:
print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o)))
return res
|
python
|
{
"resource": ""
}
|
q9997
|
get_diff
|
train
|
def get_diff(left, right):
"""Get the difference of both lists.
The result will be a dict with this form {'+': [], '-': []}.
Items listed in '+' exist only in the right list,
items listed in '-' exist only in the left list.
"""
res = {'+': [], '-': []}
def partition(objects):
"""Partition the passed object list."""
res = {}
for o in objects:
t = type(o)
if type(o) not in res:
res[t] = []
res[t].append(o)
return res
def get_not_included(foo, bar):
"""Compare objects from foo with objects defined in the values of
bar (set of partitions).
Returns a list of all objects included in list, but not dict values.
"""
res = []
for o in foo:
if not compat.object_in_list(type(o), bar):
res.append(o)
elif not compat.object_in_list(o, bar[type(o)]):
res.append(o)
return res
# Create partitions of both lists. This will reduce the time required for
# the comparison
left_objects = partition(left)
right_objects = partition(right)
# and then do the diff
res['+'] = get_not_included(right, left_objects)
res['-'] = get_not_included(left, right_objects)
return res
|
python
|
{
"resource": ""
}
|
q9998
|
filter
|
train
|
def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter
"""Filter objects.
The filter can be by type, minimum size, and/or maximum size.
Keyword arguments:
Type -- object type to filter by
min -- minimum object size
max -- maximum object size
"""
res = []
if min > max:
raise ValueError("minimum must be smaller than maximum")
if Type is not None:
res = [o for o in objects if isinstance(o, Type)]
if min > -1:
res = [o for o in res if _getsizeof(o) < min]
if max > -1:
res = [o for o in res if _getsizeof(o) > max]
return res
|
python
|
{
"resource": ""
}
|
q9999
|
get_referents
|
train
|
def get_referents(object, level=1):
"""Get all referents of an object up to a certain level.
The referents will not be returned in a specific order and
will not contain duplicate objects. Duplicate objects will be removed.
Keyword arguments:
level -- level of indirection to which referents considered.
This function is recursive.
"""
res = gc.get_referents(object)
level -= 1
if level > 0:
for o in res:
res.extend(get_referents(o, level))
res = _remove_duplicates(res)
return res
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.