code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore self.o1 = self._get_objects(ignore) diff = muppy.get_diff(self.o0, self.o1) self.o0 = self.o1 # manual cleanup, see comment above del ignore[:] #PYCHOK change ignore return diff
def get_diff(self, ignore=[])
Get the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore
8.846794
8.758439
1.010088
# ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore diff = self.get_diff(ignore) print("Added objects:") summary.print_(summary.summarize(diff['+'])) print("Removed objects:") summary.print_(summary.summarize(diff['-'])) # manual cleanup, see comment above del ignore[:]
def print_diff(self, ignore=[])
Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore
9.362415
9.569201
0.978391
L = len(seq1) if L != len(seq2): raise ValueError("expected two strings of the same length") if L == 0: return 0.0 if normalized else 0 # equal dist = sum(c1 != c2 for c1, c2 in zip(seq1, seq2)) if normalized: return dist / float(L) return dist
def hamming(seq1, seq2, normalized=False)
Compute the Hamming distance between the two sequences `seq1` and `seq2`. The Hamming distance is the number of differing items in two ordered sequences of the same length. If the sequences submitted do not have the same length, an error will be raised. If `normalized` evaluates to `False`, the return value will be an integer between 0 and the length of the sequences provided, edge values included; otherwise, it will be a float between 0 and 1 included, where 0 means equal, and 1 totally different. Normalized hamming distance is computed as: 0.0 if len(seq1) == 0 hamming_dist / len(seq1) otherwise
2.519575
2.572893
0.979277
set1, set2 = set(seq1), set(seq2) return 1 - len(set1 & set2) / float(len(set1 | set2))
def jaccard(seq1, seq2)
Compute the Jaccard distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
2.21085
2.415358
0.91533
set1, set2 = set(seq1), set(seq2) return 1 - (2 * len(set1 & set2) / float(len(set1) + len(set2)))
def sorensen(seq1, seq2)
Compute the Sorensen distance between the two sequences `seq1` and `seq2`. They should contain hashable items. The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
2.335656
2.475783
0.943401
L1, L2 = len(seq1), len(seq2) ms = [] mlen = last = 0 if L1 < L2: seq1, seq2 = seq2, seq1 L1, L2 = L2, L1 column = array('L', range(L2)) for i in range(L1): for j in range(L2): old = column[j] if seq1[i] == seq2[j]: if i == 0 or j == 0: column[j] = 1 else: column[j] = last + 1 if column[j] > mlen: mlen = column[j] ms = [(i, j)] elif column[j] == mlen: ms.append((i, j)) else: column[j] = 0 last = old if positions: return (mlen, tuple((i - mlen + 1, j - mlen + 1) for i, j in ms if ms)) return set(seq1[i - mlen + 1:i + 1] for i, _ in ms if ms)
def lcsubstrings(seq1, seq2, positions=False)
Find the longest common substring(s) in the sequences `seq1` and `seq2`. If positions evaluates to `True` only their positions will be returned, together with their length, in a tuple: (length, [(start pos in seq1, start pos in seq2)..]) Otherwise, the substrings themselves will be returned, in a set. Example: >>> lcsubstrings("sedentar", "dentist") {'dent'} >>> lcsubstrings("sedentar", "dentist", positions=True) (4, [(2, 0)])
2.119659
2.101343
1.008716
if self.color_mapping is None: self.color_mapping = {} color = self.color_mapping.get(node.key) if color is None: depth = len(self.color_mapping) red = (depth * 10) % 255 green = 200 - ((depth * 5) % 200) blue = (depth * 25) % 200 self.color_mapping[node.key] = color = wx.Colour(red, green, blue) return color
def background_color(self, node, depth)
Create a (unique-ish) background color for each node
2.375879
2.303797
1.031288
self.percentageView = percent self.total = total
def SetPercentage(self, percent, total)
Set whether to display percentage values (and total for doing so)
17.335672
10.139933
1.709644
''' Run a functions profiler and show it in a GUI visualisation using RunSnakeRun Note: can also use calibration for more exact results ''' functionprofiler.runprofile(funcname+'(\''+argv+'\')', profilepath, *args, **kwargs) print 'Showing profile (windows should open in the background)'; sys.stdout.flush(); functionprofiler.browseprofilegui(profilepath)
def runprofilerandshow(funcname, profilepath, argv='', *args, **kwargs)
Run a functions profiler and show it in a GUI visualisation using RunSnakeRun Note: can also use calibration for more exact results
15.978633
5.175519
3.087349
''' Makes a call graph Note: be sure to install GraphViz prior to printing the dot graph! ''' import pycallgraph @functools.wraps(func) def wrapper(*args, **kwargs): pycallgraph.start_trace() func(*args, **kwargs) pycallgraph.save_dot('callgraph.log') pycallgraph.make_dot_graph('callgraph.png') #pycallgraph.make_dot_graph('callgraph.jpg', format='jpg', tool='neato') return wrapper
def callgraph(func)
Makes a call graph Note: be sure to install GraphViz prior to printing the dot graph!
4.387445
3.011791
1.456756
def _long2bytes(n, blocksize=0): # After much testing, this algorithm was deemed to be the fastest. s = '' pack = struct.pack while n > 0: ### CHANGED FROM '>I' TO '<I'. (DCG) s = pack('<I', n & 0xffffffffL) + s ### -------------------------- n = n >> 32 # Strip off leading zeros. for i in range(len(s)): if s[i] <> '\000': break else: # Only happens when n == 0. s = '\000' i = 0 s = s[i:] # Add back some pad bytes. This could be done more efficiently # w.r.t. the de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * '\000' + s return s
Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize.
null
null
null
def XX(func, a, b, c, d, x, s, ac): res = 0L res = res + a + func(b, c, d) res = res + x res = res + ac res = res & 0xffffffffL res = _rotateLeft(res, s) res = res & 0xffffffffL res = res + b return res & 0xffffffffL
Wrapper for call distribution to functions F, G, H and I. This replaces functions FF, GG, HH and II from "Appl. Crypto. Rotation is separate from addition to prevent recomputation (now summed-up in one function).
null
null
null
def init(self): "Initialize the message-digest and set all fields to zero." self.length = 0L self.input = [] # Load magic initialization constants. self.A = 0x67452301L self.B = 0xefcdab89L self.C = 0x98badcfeL self.D = 0x10325476L
Initialize the message-digest and set all fields to zero.
null
null
null
def update(self, inBuf): leninBuf = long(len(inBuf)) # Compute number of bytes mod 64. index = (self.count[0] >> 3) & 0x3FL # Update number of bits. self.count[0] = self.count[0] + (leninBuf << 3) if self.count[0] < (leninBuf << 3): self.count[1] = self.count[1] + 1 self.count[1] = self.count[1] + (leninBuf >> 29) partLen = 64 - index if leninBuf >= partLen: self.input[index:] = map(None, inBuf[:partLen]) self._transform(_bytelist2long(self.input)) i = partLen while i + 63 < leninBuf: self._transform(_bytelist2long(map(None, inBuf[i:i+64]))) i = i + 64 else: self.input = map(None, inBuf[i:leninBuf]) else: i = 0 self.input = self.input + map(None, inBuf)
Add to the current message. Update the md5 object with the string arg. Repeated calls are equivalent to a single call with the concatenation of all the arguments, i.e. m.update(a); m.update(b) is equivalent to m.update(a+b).
null
null
null
def digest(self): A = self.A B = self.B C = self.C D = self.D input = [] + self.input count = [] + self.count index = (self.count[0] >> 3) & 0x3fL if index < 56: padLen = 56 - index else: padLen = 120 - index padding = ['\200'] + ['\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2long(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = _long2bytes(self.A << 96, 16)[:4] + \ _long2bytes(self.B << 64, 16)[4:8] + \ _long2bytes(self.C << 32, 16)[8:12] + \ _long2bytes(self.D, 16)[12:] self.A = A self.B = B self.C = C self.D = D self.input = input self.count = count return digest
Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes.
null
null
null
def hexdigest(self): d = map(None, self.digest()) d = map(ord, d) d = map(lambda x:"%02x" % x, d) d = string.join(d, '') return d
Terminate and return digest in HEX form. Like digest() except the digest is returned as a string of length 32, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non- binary environments.
null
null
null
# this is the *weighted* size/contribution of the node try: return node['contribution'] except KeyError, err: contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1)) node['contribution'] = contribution return contribution
def value( self, node, parent=None )
Return value used to compare size of this node
10.741731
10.23534
1.049475
result = [] if node.get('type'): result.append( node['type'] ) if node.get('name' ): result.append( node['name'] ) elif node.get('value') is not None: result.append( unicode(node['value'])[:32]) if 'module' in node and not node['module'] in result: result.append( ' in %s'%( node['module'] )) if node.get( 'size' ): result.append( '%s'%( mb( node['size'] ))) if node.get( 'totsize' ): result.append( '(%s)'%( mb( node['totsize'] ))) parent_count = len( node.get('parents',())) if parent_count > 1: result.append( '/%s refs'%( parent_count )) return " ".join(result)
def label( self, node )
Return textual description of this node
3.256465
3.146892
1.03482
if 'index' in node: index = node['index']() parents = list(meliaeloader.children( node, index, 'parents' )) return parents return []
def parents( self, node )
Retrieve/calculate the set of parents for the given node
16.010464
16.121777
0.993095
parents = self.parents(node) selected_parent = None if node['type'] == 'type': module = ".".join( node['name'].split( '.' )[:-1] ) if module: for mod in parents: if mod['type'] == 'module' and mod['name'] == module: selected_parent = mod if parents and selected_parent is None: parents.sort( key = lambda x: self.value(node, x) ) return parents[-1] return selected_parent
def best_parent( self, node, tree_type=None )
Choose the best parent for a given node
3.727097
3.625214
1.028104
''' Decorator for client code's main function. Serializes argparse data to JSON for use with the Gooey front end ''' params = locals() def build(payload): def run_gooey(self, args=None, namespace=None): source_path = sys.argv[0] build_spec = config_generator.create_from_parser(self, source_path, payload_name=payload.__name__, **params) if dump_build_config: config_path = os.path.join(os.getcwd(), 'gooey_config.json') print( 'Writing Build Config to: {}'.format(config_path)) with open(config_path, 'w') as f: f.write(json.dumps(build_spec, indent=2)) application.run(build_spec) def inner2(*args, **kwargs): ArgumentParser.original_parse_args = ArgumentParser.parse_args ArgumentParser.parse_args = run_gooey return payload(*args, **kwargs) inner2.__name__ = payload.__name__ return inner2 def run_without_gooey(func): return lambda: func() if IGNORE_COMMAND in sys.argv: sys.argv.remove(IGNORE_COMMAND) if callable(f): return run_without_gooey(f) return run_without_gooey if callable(f): return build(f) return build
def Gooey(f=None, advanced=True, language='english', show_config=True, program_name=None, program_description=None, default_size=(610, 530), required_cols=2, optional_cols=2, dump_build_config=False, monospace_display=False)
Decorator for client code's main function. Serializes argparse data to JSON for use with the Gooey front end
3.89424
3.248074
1.198938
''' :param widget_list: list of dicts containing widget info (name, type, etc..) :return: ComponentList Converts the Json widget information into concrete wx Widget types ''' required_args, optional_args = partition(widget_list, is_required) checkbox_args, general_args = partition(map(build_widget, optional_args), is_checkbox) required_args = map(build_widget, required_args) optional_args = general_args + checkbox_args return ComponentList(required_args, optional_args)
def build_components(widget_list)
:param widget_list: list of dicts containing widget info (name, type, etc..) :return: ComponentList Converts the Json widget information into concrete wx Widget types
6.18144
2.889279
2.13944
ref2key = lambda ref: ref.name.split(':')[0] base.size += other.size base.flat += other.flat if level > 0: base.name = ref2key(base) # Add refs from other to base. Any new refs are appended. base.refs = list(base.refs) # we may need to append items refs = {} for ref in base.refs: refs[ref2key(ref)] = ref for ref in other.refs: key = ref2key(ref) if key in refs: _merge_asized(refs[key], ref, level=level+1) else: # Don't modify existing Asized instances => deepcopy base.refs.append(deepcopy(ref)) base.refs[-1].name = key
def _merge_asized(base, other, level=0)
Merge **Asized** instances `base` and `other` into `base`.
4.282745
4.113398
1.04117
size = None for (timestamp, tsize) in obj.snapshots: if timestamp == tref: size = tsize if size: _merge_asized(merged, size)
def _merge_objects(tref, merged, obj)
Merge the snapshot size information of multiple tracked objects. The tracked object `obj` is scanned for size information at time `tref`. The sizes are merged into **Asized** instance `merged`.
9.156248
5.080401
1.802269
lines = [] for fname, lineno, func, src, _ in trace: if src: for line in src: lines.append(' '+line.strip()+'\n') lines.append(' %s:%4d in %s\n' % (fname, lineno, func)) return ''.join(lines)
def _format_trace(trace)
Convert the (stripped) stack-trace to a nice readable format. The stack trace `trace` is a list of frame records as returned by **inspect.stack** but without the frame objects. Returns a string.
3.545905
3.677179
0.9643
if isinstance(fdump, type('')): fdump = open(fdump, 'rb') self.index = pickle.load(fdump) self.snapshots = pickle.load(fdump) self.sorted = []
def load_stats(self, fdump)
Load the data from a dump file. The argument `fdump` can be either a filename or an open file object that requires read access.
3.933928
3.559399
1.105222
if self.tracker: self.tracker.stop_periodic_snapshots() if isinstance(fdump, type('')): fdump = open(fdump, 'wb') pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL) if close: fdump.close()
def dump_stats(self, fdump, close=True)
Dump the logged data to a file. The argument `file` can be either a filename or an open file object that requires write access. `close` controls if the file is closed before leaving this method (the default behaviour).
2.928074
2.929214
0.999611
if not self.sorted: # Identify the snapshot that tracked the largest amount of memory. tmax = None maxsize = 0 for snapshot in self.snapshots: if snapshot.tracked_total > maxsize: tmax = snapshot.timestamp for key in list(self.index.keys()): for tobj in self.index[key]: tobj.classname = key tobj.size = tobj.get_max_size() tobj.tsize = tobj.get_size_at_time(tmax) self.sorted.extend(self.index[key])
def _init_sort(self)
Prepare the data to be sorted. If not yet sorted, import all tracked objects from the tracked index. Extend the tracking information by implicit information to make sorting easier (DSU pattern).
5.063382
4.456531
1.136171
criteria = ('classname', 'tsize', 'birth', 'death', 'name', 'repr', 'size') if not set(criteria).issuperset(set(args)): raise ValueError("Invalid sort criteria") if not args: args = criteria def args_to_tuple(obj): keys = [] for attr in args: attribute = getattr(obj, attr) if attr in ('tsize', 'size'): attribute = -attribute keys.append(attribute) return tuple(keys) self._init_sort() self.sorted.sort(key=args_to_tuple) return self
def sort_stats(self, *args)
Sort the tracked objects according to the supplied criteria. The argument is a string identifying the basis of a sort (example: 'size' or 'classname'). When more than one key is provided, then additional keys are used as secondary criteria when there is equality in all keys selected before them. For example, ``sort_stats('name', 'size')`` will sort all the entries according to their class name, and resolve all ties (identical class names) by sorting by size. The criteria are fields in the tracked object instances. Results are stored in the ``self.sorted`` list which is used by ``Stats.print_stats()`` and other methods. The fields available for sorting are: 'classname' the name with which the class was registered 'name' the classname 'birth' creation timestamp 'death' destruction timestamp 'size' the maximum measured size of the object 'tsize' the measured size during the largest snapshot 'repr' string representation of the object Note that sorts on size are in descending order (placing most memory consuming items first), whereas name, repr, and creation time searches are in ascending order (alphabetical). The function returns self to allow calling functions on the result:: stats.sort_stats('size').reverse_order().print_stats()
4.622328
3.413953
1.353952
if hasattr(snapshot, 'classes'): return snapshot.classes = {} for classname in list(self.index.keys()): total = 0 active = 0 merged = Asized(0, 0) for tobj in self.index[classname]: _merge_objects(snapshot.timestamp, merged, tobj) total += tobj.get_size_at_time(snapshot.timestamp) if tobj.birth < snapshot.timestamp and \ (tobj.death is None or tobj.death > snapshot.timestamp): active += 1 try: pct = total * 100.0 / snapshot.total except ZeroDivisionError: # pragma: no cover pct = 0 try: avg = total / active except ZeroDivisionError: avg = 0 snapshot.classes[classname] = dict(sum=total, avg=avg, pct=pct, active=active) snapshot.classes[classname]['merged'] = merged
def annotate_snapshot(self, snapshot)
Store additional statistical data in snapshot.
3.773871
3.692313
1.022089
lrefs = list(refs) lrefs.sort(key=lambda x: x.size) lrefs.reverse() for ref in lrefs: if ref.size > minsize and (ref.size*100.0/total) > minpct: self.stream.write('%-50s %-14s %3d%% [%d]\n' % ( trunc(prefix+str(ref.name), 50), pp(ref.size), int(ref.size*100.0/total), level )) self._print_refs(ref.refs, total, prefix=prefix+' ', level=level+1)
def _print_refs(self, refs, total, prefix=' ', level=1, minsize=0, minpct=0.1)
Print individual referents recursively.
2.794064
2.836583
0.98501
if tobj.death: self.stream.write('%-32s ( free ) %-35s\n' % ( trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35))) else: self.stream.write('%-32s 0x%08x %-35s\n' % ( trunc(tobj.name, 32, left=1), tobj.id, trunc(tobj.repr, 35) )) if tobj.trace: self.stream.write(_format_trace(tobj.trace)) for (timestamp, size) in tobj.snapshots: self.stream.write(' %-30s %s\n' % ( pp_timestamp(timestamp), pp(size.size) )) self._print_refs(size.refs, size.size) if tobj.death is not None: self.stream.write(' %-30s finalize\n' % ( pp_timestamp(tobj.death), ))
def print_object(self, tobj)
Print the gathered information of object `tobj` in human-readable format.
3.710185
3.696183
1.003788
if self.tracker: self.tracker.stop_periodic_snapshots() if not self.sorted: self.sort_stats() _sorted = self.sorted if clsname: _sorted = [to for to in _sorted if clsname in to.classname] if limit < 1.0: limit = max(1, int(len(self.sorted) * limit)) _sorted = _sorted[:int(limit)] # Emit per-instance data for tobj in _sorted: self.print_object(tobj)
def print_stats(self, clsname=None, limit=1.0)
Write tracked objects to stdout. The output can be filtered and pruned. Only objects are printed whose classname contain the substring supplied by the `clsname` argument. The output can be pruned by passing a `limit` value. :param clsname: Only print objects whose classname contain the given substring. :param limit: If `limit` is a float smaller than one, only the supplied percentage of the total tracked data is printed. If `limit` is bigger than one, this number of tracked objects are printed. Tracked objects are first filtered, and then pruned (if specified).
4.494156
4.550084
0.987708
# Emit class summaries for each snapshot classlist = self.tracked_classes fobj = self.stream fobj.write('---- SUMMARY '+'-'*66+'\n') for snapshot in self.snapshots: self.annotate_snapshot(snapshot) fobj.write('%-35s %11s %12s %12s %5s\n' % ( trunc(snapshot.desc, 35), 'active', pp(snapshot.asizeof_total), 'average', 'pct' )) for classname in classlist: info = snapshot.classes.get(classname) fobj.write(' %-33s %11d %12s %12s %4d%%\n' % ( trunc(classname, 33), info['active'], pp(info['sum']), pp(info['avg']), info['pct'] )) fobj.write('-'*79+'\n')
def print_summary(self)
Print per-class summary for each snapshot.
4.865249
4.568642
1.064922
lrefs = list(refs) lrefs.sort(key=lambda x: x.size) lrefs.reverse() if level == 1: fobj.write('<table>\n') for ref in lrefs: if ref.size > minsize and (ref.size*100.0/total) > minpct: data = dict(level=level, name=trunc(str(ref.name), 128), size=pp(ref.size), pct=ref.size*100.0/total) fobj.write(self.refrow % data) self._print_refs(fobj, ref.refs, total, level=level+1) if level == 1: fobj.write("</table>\n")
def _print_refs(self, fobj, refs, total, level=1, minsize=0, minpct=0.1)
Print individual referents recursively.
2.538782
2.531168
1.003008
fobj = open(fname, "w") fobj.write(self.header % (classname, self.style)) fobj.write("<h1>%s</h1>\n" % (classname)) sizes = [tobj.get_max_size() for tobj in self.index[classname]] total = 0 for s in sizes: total += s data = {'cnt': len(self.index[classname]), 'cls': classname} data['avg'] = pp(total / len(sizes)) data['max'] = pp(max(sizes)) data['min'] = pp(min(sizes)) fobj.write(self.class_summary % data) fobj.write(self.charts[classname]) fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n") for snapshot in self.snapshots: if classname in snapshot.classes: merged = snapshot.classes[classname]['merged'] fobj.write(self.class_snapshot % { 'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size) }) if merged.refs: self._print_refs(fobj, merged.refs, merged.size) else: fobj.write('<p>No per-referent sizes recorded.</p>\n') fobj.write("<h2>Instances</h2>\n") for tobj in self.index[classname]: fobj.write('<table id="tl" width="100%" rules="rows">\n') fobj.write('<tr><td id="hl" width="140px">Instance</td><td id="hl">%s at 0x%08x</td></tr>\n' % (tobj.name, tobj.id)) if tobj.repr: fobj.write("<tr><td>Representation</td><td>%s&nbsp;</td></tr>\n" % tobj.repr) fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" % (pp_timestamp(tobj.birth), pp_timestamp(tobj.death))) if tobj.trace: trace = "<pre>%s</pre>" % (_format_trace(tobj.trace)) fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" % trace) for (timestamp, size) in tobj.snapshots: fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp)) if not size.refs: fobj.write("<td>%s</td></tr>\n" % pp(size.size)) else: fobj.write("<td>%s" % pp(size.size)) self._print_refs(fobj, size.refs, size.size) fobj.write("</td></tr>\n") fobj.write("</table>\n") fobj.write(self.footer) fobj.close()
def print_class_details(self, fname, classname)
Print detailed statistics and instances for the class `classname`. All data will be written to the file `fname`.
2.753763
2.760032
0.997729
if basepath is None: basepath = self.basedir if not basepath: return filepath if filepath.startswith(basepath): rel = filepath[len(basepath):] if rel and rel[0] == os.sep: rel = rel[1:] return rel
def relative_path(self, filepath, basepath=None)
Convert the filepath path to a relative path against basepath. By default basepath is self.basedir.
2.467152
2.202737
1.120039
fobj = open(filename, "w") fobj.write(self.header % (title, self.style)) fobj.write("<h1>%s</h1>\n" % title) fobj.write("<h2>Memory distribution over time</h2>\n") fobj.write(self.charts['snapshots']) fobj.write("<h2>Snapshots statistics</h2>\n") fobj.write('<table id="nb">\n') classlist = list(self.index.keys()) classlist.sort() for snapshot in self.snapshots: fobj.write('<tr><td>\n') fobj.write('<table id="tl" rules="rows">\n') fobj.write("<h3>%s snapshot at %s</h3>\n" % ( snapshot.desc or 'Untitled', pp_timestamp(snapshot.timestamp) )) data = {} data['sys'] = pp(snapshot.system_total.vsz) data['tracked'] = pp(snapshot.tracked_total) data['asizeof'] = pp(snapshot.asizeof_total) data['overhead'] = pp(getattr(snapshot, 'overhead', 0)) fobj.write(self.snapshot_summary % data) if snapshot.tracked_total: fobj.write(self.snapshot_cls_header) for classname in classlist: data = snapshot.classes[classname].copy() data['cls'] = '<a href="%s">%s</a>' % (self.relative_path(self.links[classname]), classname) data['sum'] = pp(data['sum']) data['avg'] = pp(data['avg']) fobj.write(self.snapshot_cls % data) fobj.write('</table>') fobj.write('</td><td>\n') if snapshot.tracked_total: fobj.write(self.charts[snapshot]) fobj.write('</td></tr>\n') fobj.write("</table>\n") fobj.write(self.footer) fobj.close()
def create_title_page(self, filename, title='')
Output the title page.
3.027782
3.033465
0.998127
try: from pylab import figure, title, xlabel, ylabel, plot, savefig except ImportError: return HtmlStats.nopylab_msg % (classname+" lifetime") cnt = [] for tobj in self.index[classname]: cnt.append([tobj.birth, 1]) if tobj.death: cnt.append([tobj.death, -1]) cnt.sort() for i in range(1, len(cnt)): cnt[i][1] += cnt[i-1][1] #if cnt[i][0] == cnt[i-1][0]: # del cnt[i-1] x = [t for [t,c] in cnt] y = [c for [t,c] in cnt] figure() xlabel("Execution time [s]") ylabel("Instance #") title("%s instances" % classname) plot(x, y, 'o') savefig(filename) return self.chart_tag % (os.path.basename(filename))
def create_lifetime_chart(self, classname, filename='')
Create chart that depicts the lifetime of the instance registered with `classname`. The output is written to `filename`.
3.435451
3.396087
1.011591
try: from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig import matplotlib.mlab as mlab except ImportError: return self.nopylab_msg % ("memory allocation") classlist = self.tracked_classes times = [snapshot.timestamp for snapshot in self.snapshots] base = [0] * len(self.snapshots) poly_labels = [] polys = [] for cn in classlist: pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots] if max(pct) > 3.0: sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots] sz = [sx+sy for sx, sy in zip(base, sz)] xp, yp = mlab.poly_between(times, base, sz) polys.append( ((xp, yp), {'label': cn}) ) poly_labels.append(cn) base = sz figure() title("Snapshot Memory") xlabel("Execution Time [s]") ylabel("Virtual Memory [MiB]") sizes = [float(fp.asizeof_total)/(1024*1024) for fp in self.snapshots] plot(times, sizes, 'r--', label='Total') sizes = [float(fp.tracked_total)/(1024*1024) for fp in self.snapshots] plot(times, sizes, 'b--', label='Tracked total') for (args, kwds) in polys: fill(*args, **kwds) legend(loc=2) savefig(filename) return self.chart_tag % (self.relative_path(filename))
def create_snapshot_chart(self, filename='')
Create chart that depicts the memory allocation over time apportioned to the tracked classes.
4.020032
3.776242
1.064559
try: from pylab import figure, title, pie, axes, savefig from pylab import sum as pylab_sum except ImportError: return self.nopylab_msg % ("pie_chart") # Don't bother illustrating a pie without pieces. if not snapshot.tracked_total: return '' classlist = [] sizelist = [] for k, v in list(snapshot.classes.items()): if v['pct'] > 3.0: classlist.append(k) sizelist.append(v['sum']) sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist)) classlist.insert(0, 'Other') #sizelist = [x*0.01 for x in sizelist] title("Snapshot (%s) Memory Distribution" % (snapshot.desc)) figure(figsize=(8,8)) axes([0.1, 0.1, 0.8, 0.8]) pie(sizelist, labels=classlist) savefig(filename, dpi=50) return self.chart_tag % (self.relative_path(filename))
def create_pie_chart(self, snapshot, filename='')
Create a pie chart that depicts the distribution of the allocated memory for a given `snapshot`. The chart is saved to `filename`.
4.63129
4.520814
1.024437
# Create a folder to store the charts and additional HTML files. self.basedir = os.path.dirname(os.path.abspath(fname)) self.filesdir = os.path.splitext(fname)[0] + '_files' if not os.path.isdir(self.filesdir): os.mkdir(self.filesdir) self.filesdir = os.path.abspath(self.filesdir) self.links = {} # Annotate all snapshots in advance self.annotate() # Create charts. The tags to show the images are returned and stored in # the self.charts dictionary. This allows to return alternative text if # the chart creation framework is not available. self.charts = {} fn = os.path.join(self.filesdir, 'timespace.png') self.charts['snapshots'] = self.create_snapshot_chart(fn) for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))): fn = os.path.join(self.filesdir, 'fp%d.png' % (idx)) self.charts[fp] = self.create_pie_chart(fp, fn) for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'-lt.png') self.charts[cn] = self.create_lifetime_chart(cn, fn) # Create HTML pages first for each class and then the index page. for cn in list(self.index.keys()): fn = os.path.join(self.filesdir, cn.replace('.', '_')+'.html') self.links[cn] = fn self.print_class_details(fn, cn) self.create_title_page(fname, title=title)
def create_html(self, fname, title="ClassTracker Statistics")
Create HTML page `fname` and additional files in a directory derived from `fname`.
3.733995
3.676524
1.015632
path_cls = type(parent_path) is_dir = path_cls.is_dir exists = path_cls.exists listdir = parent_path._accessor.listdir return self._select_from(parent_path, is_dir, exists, listdir)
def select_from(self, parent_path)
Iterate over all child paths of `parent_path` matched by this selector. This can contain parent_path itself.
4.079235
4.15204
0.982465
# For the purpose of this method, drive and root are considered # separate parts, i.e.: # Path('c:/').relative_to('c:') gives Path('/') # Path('c:/').relative_to('/') raise ValueError if not other: raise TypeError("need at least one argument") parts = self._parts drv = self._drv root = self._root if root: abs_parts = [drv, root] + parts[1:] else: abs_parts = parts to_drv, to_root, to_parts = self._parse_args(other) if to_root: to_abs_parts = [to_drv, to_root] + to_parts[1:] else: to_abs_parts = to_parts n = len(to_abs_parts) cf = self._flavour.casefold_parts if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts): formatted = self._format_parsed_parts(to_drv, to_root, to_parts) raise ValueError("{!r} does not start with {!r}" .format(str(self), str(formatted))) return self._from_parsed_parts('', root if n == 1 else '', abs_parts[n:])
def relative_to(self, *other)
Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError.
3.637555
3.3882
1.073595
pattern = self._flavour.casefold(pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(tuple(pattern_parts)) for p in selector.select_from(self): yield p
def glob(self, pattern)
Iterate over this subtree and yield all existing files (of any kind, including directories) matching the given pattern.
7.513981
7.642473
0.983187
if not isinstance(data, six.binary_type): raise TypeError( 'data must be %s, not %s' % (six.binary_type.__class__.__name__, data.__class__.__name__)) with self.open(mode='wb') as f: return f.write(data)
def write_bytes(self, data)
Open the file in bytes mode, write to it, and close the file.
2.805686
2.498583
1.122911
if not isinstance(data, six.text_type): raise TypeError( 'data must be %s, not %s' % (six.text_type.__class__.__name__, data.__class__.__name__)) with self.open(mode='w', encoding=encoding, errors=errors) as f: return f.write(data)
def write_text(self, data, encoding=None, errors=None)
Open the file in text mode, write to it, and close the file.
2.397429
2.281594
1.050769
''' Helper function to convert all paths to relative posix like paths (to ease comparison) ''' return recwalk_result[0], path2unix(os.path.join(os.path.relpath(recwalk_result[0], pardir),recwalk_result[1]), nojoin=True, fromwinpath=fromwinpath)
def relpath_posix(recwalk_result, pardir, fromwinpath=False)
Helper function to convert all paths to relative posix like paths (to ease comparison)
5.278035
3.19172
1.653664
# Find the path that is the deepest, and count the number of parts max_rec = max(len(x) if x else 0 for x in d.values()) # Pad other paths with empty parts to fill in, so that all paths will have the same number of parts (necessary to compare correctly, else deeper paths may get precedence over top ones, since the folder name will be compared to filenames!) for key in d.keys(): if d[key]: d[key] = ['']*(max_rec-len(d[key])) + d[key] # Sort the dict relatively to the paths alphabetical order d_sort = sorted(d.items(), key=lambda x: x[1]) return d_sort
def sort_dict_of_paths(d)
Sort a dict containing paths parts (ie, paths divided in parts and stored as a list). Top paths will be given precedence over deeper paths.
7.229216
6.344002
1.139536
''' Sort a dictionary of relative paths and cluster equal paths together at the same time ''' # First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure). d_sort = sort_dict_of_paths(d) # Pop the first item in the ordered list base_elt = (-1, None) while (base_elt[1] is None and d_sort): base_elt = d_sort.pop(0) # No element, then we just return if base_elt[1] is None: return None # Else, we will now group equivalent files together (remember we are working on multiple directories, so we can have multiple equivalent relative filepaths, but of course the absolute filepaths are different). else: # Init by creating the first group and pushing the first ordered filepath into the first group lst = [] lst.append([base_elt]) if d_sort: # For each subsequent filepath for elt in d_sort: # If the filepath is not empty (generator died) if elt[1] is not None: # If the filepath is the same to the latest grouped filepath, we add it to the same group if elt[1] == base_elt[1]: lst[-1].append(elt) # Else the filepath is different: we create a new group, add the filepath to this group, and replace the latest grouped filepath else: if return_only_first: break # break here if we only need the first group lst.append([elt]) base_elt = elt # replace the latest grouped filepath return lst
def sort_group(d, return_only_first=False)
Sort a dictionary of relative paths and cluster equal paths together at the same time
7.639977
6.803842
1.122892
self.ignore.append(inspect.currentframe()) return self._get_tree(self.root, self.maxdepth)
def get_tree(self)
Get a tree of referrers of the root object.
8.487645
6.003361
1.413815
self.ignore.append(inspect.currentframe()) res = _Node(root, self.str_func) #PYCHOK use root parameter self.already_included.add(id(root)) #PYCHOK use root parameter if maxdepth == 0: return res objects = gc.get_referrers(root) #PYCHOK use root parameter self.ignore.append(objects) for o in objects: # XXX: find a better way to ignore dict of _Node objects if isinstance(o, dict): sampleNode = _Node(1) if list(sampleNode.__dict__.keys()) == list(o.keys()): continue _id = id(o) if not self.repeat and (_id in self.already_included): s = self.str_func(o) res.children.append("%s (already included, id %s)" %\ (s, _id)) continue if (not isinstance(o, _Node)) and (o not in self.ignore): res.children.append(self._get_tree(o, maxdepth-1)) return res
def _get_tree(self, root, maxdepth)
Workhorse of the get_tree implementation. This is an recursive method which is why we have a wrapper method. root is the current root object of the tree which should be returned. Note that root is not of the type _Node. maxdepth defines how much further down the from the root the tree should be build.
4.669261
4.554174
1.025271
if tree is None: self._print(self.root, '', '') else: self._print(tree, '', '')
def print_tree(self, tree=None)
Print referrers tree to console. keyword arguments tree -- if not None, the passed tree will be printed. Otherwise it is based on the rootobject.
3.880343
4.602819
0.843036
level = prefix.count(self.cross) + prefix.count(self.vline) len_children = 0 if isinstance(tree , _Node): len_children = len(tree.children) # add vertex prefix += str(tree) # and as many spaces as the vertex is long carryon += self.space * len(str(tree)) if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\ (len_children == 0): self.stream.write(prefix+'\n') return else: # add in between connections prefix += self.hline carryon += self.space # if there is more than one branch, add a cross if len(tree.children) > 1: prefix += self.cross carryon += self.vline prefix += self.hline carryon += self.space if len_children > 0: # print the first branch (on the same line) self._print(tree.children[0], prefix, carryon) for b in range(1, len_children): # the caryon becomes the prefix for all following children prefix = carryon[:-2] + self.cross + self.hline # remove the vlines for any children of last branch if b == (len_children-1): carryon = carryon[:-2] + 2*self.space self._print(tree.children[b], prefix, carryon) # leave a free line before the next branch if b == (len_children-1): if len(carryon.strip(' ')) == 0: return self.stream.write(carryon[:-2].rstrip()+'\n')
def _print(self, tree, prefix, carryon)
Compute and print a new line of the tree. This is a recursive function. arguments tree -- tree to print prefix -- prefix to the current line to print carryon -- prefix which is used to carry on the vertical lines
4.161903
4.131924
1.007256
old_stream = self.stream self.stream = open(filename, 'w') try: super(FileBrowser, self).print_tree(tree=tree) finally: self.stream.close() self.stream = old_stream
def print_tree(self, filename, tree=None)
Print referrers tree to file (in text format). keyword arguments tree -- if not None, the passed tree will be printed.
2.782847
2.854657
0.974845
window = _Tkinter.Tk() sc = _TreeWidget.ScrolledCanvas(window, bg="white",\ highlightthickness=0, takefocus=1) sc.frame.pack(expand=1, fill="both") item = _ReferrerTreeItem(window, self.get_tree(), self) node = _TreeNode(sc.canvas, None, item) node.expand() if standalone: window.mainloop()
def main(self, standalone=False)
Create interactive browser window. keyword arguments standalone -- Set to true, if the browser is not attached to other windows
6.406559
7.469886
0.857652
''' Returns str(option_string * DropDown Value) e.g. -vvvvv ''' dropdown_value = self.widget.GetValue() if not str(dropdown_value).isdigit(): return '' arg = str(self.option_string).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
def getValue(self)
Returns str(option_string * DropDown Value) e.g. -vvvvv
11.742067
3.993992
2.939933
# in case the total is wrong (n is above the total), then # we switch to the mode without showing the total prediction # (since ETA would be wrong anyway) if total and n > total: total = None elapsed_str = format_interval(elapsed) if elapsed: if unit_scale: rate = format_sizeof(n / elapsed, suffix='') else: rate = '{0:5.2f}'.format(n / elapsed) else: rate = '?' rate_unit = unit if unit else 'it' if not unit: unit = '' n_fmt = str(n) total_fmt = str(total) if unit_scale: n_fmt = format_sizeof(n, suffix='') if total: total_fmt = format_sizeof(total, suffix='') if total: frac = n / total percentage = frac * 100 remaining_str = format_interval(elapsed * (total-n) / n) if n else '?' l_bar = '{1}{0:.0f}%|'.format(percentage, prefix) if prefix else \ '{0:3.0f}%|'.format(percentage) r_bar = '| {0}/{1}{2} [{3}<{4}, {5} {6}/s]'.format( n_fmt, total_fmt, unit, elapsed_str, remaining_str, rate, rate_unit) if ncols == 0: bar = '' else: N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \ else 10 if ascii: bar_length, frac_bar_length = divmod( int(frac * N_BARS * 10), 10) bar = '#'*bar_length frac_bar = chr(48 + frac_bar_length) if frac_bar_length \ else ' ' else: bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8) bar = _unich(0x2588)*bar_length frac_bar = _unich(0x2590 - frac_bar_length) \ if frac_bar_length else ' ' if bar_length < N_BARS: full_bar = bar + frac_bar + \ ' ' * max(N_BARS - bar_length - 1, 0) # spacing else: full_bar = bar + \ ' ' * max(N_BARS - bar_length, 0) # spacing return l_bar + full_bar + r_bar else: # no progressbar nor ETA, just progress statistics return '{0}{1} [{2}, {3} {4}/s]'.format( n_fmt, unit, elapsed_str, rate, rate_unit)
def format_meter(n, total, elapsed, ncols=None, prefix='', unit=None, unit_scale=False, ascii=False)
Return a string-based progress bar given some parameters Parameters ---------- n : int Number of finished iterations. total : int The expected total number of iterations. If None, only basic progress statistics are displayed (no ETA). elapsed : float Number of seconds passed since start. ncols : int, optional The width of the entire output message. If sepcified, dynamically resizes the progress meter [default: None]. The fallback meter width is 10. prefix : str, optional Prefix message (included in total width). unit : str, optional String that will be used to define the unit of each iteration. [default: "it"] unit_scale : bool, optional If set, the number of iterations will be reduced/scaled automatically and a metric prefix following the International System of Units standard will be added (kilo, mega, etc.). [default: False] ascii : bool, optional If not set, use unicode (smooth blocks) to fill the meter [default: False]. The fallback is to use ASCII characters (1-9 #). Returns ------- out : Formatted meter and stats, ready to display.
3.099249
3.082521
1.005427
import cStringIO stream = cStringIO.StringIO(data) image = wx.ImageFromStream(stream) icon = wx.EmptyIcon() icon.CopyFromBitmap(wx.BitmapFromImage(image)) return icon
def getIcon( data )
Return the data from the resource as a wxIcon
2.511637
2.293953
1.094895
logging.basicConfig(level=logging.INFO) app = RunSnakeRunApp(0) app.MainLoop()
def main()
Mainloop for the application
8.660938
7.577925
1.142917
self.CreateMenuBar() self.SetupToolBar() self.CreateStatusBar() self.leftSplitter = wx.SplitterWindow( self ) self.rightSplitter = wx.SplitterWindow( self.leftSplitter ) self.listControl = listviews.DataView( self.leftSplitter, columns = PROFILE_VIEW_COLUMNS, name='mainlist', ) self.squareMap = squaremap.SquareMap( self.rightSplitter, padding = 6, labels = True, adapter = self.adapter, square_style = True, ) self.tabs = wx.Notebook( self.rightSplitter, ) self.CreateSourceWindow(self.tabs) self.calleeListControl = listviews.DataView( self.tabs, columns = PROFILE_VIEW_COLUMNS, name='callee', ) self.allCalleeListControl = listviews.DataView( self.tabs, columns = PROFILE_VIEW_COLUMNS, name='allcallee', ) self.allCallerListControl = listviews.DataView( self.tabs, columns = PROFILE_VIEW_COLUMNS, name='allcaller', ) self.callerListControl = listviews.DataView( self.tabs, columns = PROFILE_VIEW_COLUMNS, name='caller', ) self.ProfileListControls = [ self.listControl, self.calleeListControl, self.allCalleeListControl, self.callerListControl, self.allCallerListControl, ] self.tabs.AddPage(self.calleeListControl, _('Callees'), True) self.tabs.AddPage(self.allCalleeListControl, _('All Callees'), False) self.tabs.AddPage(self.callerListControl, _('Callers'), False) self.tabs.AddPage(self.allCallerListControl, _('All Callers'), False) if editor: self.tabs.AddPage(self.sourceCodeControl, _('Source Code'), False) self.rightSplitter.SetSashSize(10) # calculate size as proportional value for initial display... self.LoadState( config_parser ) width, height = self.GetSizeTuple() rightsplit = 2 * (height // 3) leftsplit = width // 3 self.rightSplitter.SplitHorizontally(self.squareMap, self.tabs, rightsplit) self.leftSplitter.SplitVertically(self.listControl, self.rightSplitter, leftsplit) squaremap.EVT_SQUARE_HIGHLIGHTED(self.squareMap, self.OnSquareHighlightedMap) squaremap.EVT_SQUARE_SELECTED(self.listControl, self.OnSquareSelectedList) squaremap.EVT_SQUARE_SELECTED(self.squareMap, self.OnSquareSelectedMap) squaremap.EVT_SQUARE_ACTIVATED(self.squareMap, self.OnNodeActivated) for control in self.ProfileListControls: squaremap.EVT_SQUARE_ACTIVATED(control, self.OnNodeActivated) squaremap.EVT_SQUARE_HIGHLIGHTED(control, self.OnSquareHighlightedList) self.moreSquareViewItem.Check(self.squareMap.square_style)
def CreateControls(self, config_parser)
Create our sub-controls
2.604001
2.602023
1.00076
menubar = wx.MenuBar() menu = wx.Menu() menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file')) menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file')) menu.AppendSeparator() menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window')) menubar.Append(menu, _('&File')) menu = wx.Menu() # self.packageMenuItem = menu.AppendCheckItem( # ID_PACKAGE_VIEW, _('&File View'), # _('View time spent by package/module') # ) self.percentageMenuItem = menu.AppendCheckItem( ID_PERCENTAGE_VIEW, _('&Percentage View'), _('View time spent as percent of overall time') ) self.rootViewItem = menu.Append( ID_ROOT_VIEW, _('&Root View (Home)'), _('View the root of the tree') ) self.backViewItem = menu.Append( ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history') ) self.upViewItem = menu.Append( ID_UP_VIEW, _('&Up'), _('Go "up" to the parent of this node with the largest cumulative total') ) self.moreSquareViewItem = menu.AppendCheckItem( ID_MORE_SQUARE, _('&Hierarchic Squares'), _('Toggle hierarchic squares in the square-map view') ) # This stuff isn't really all that useful for profiling, # it's more about how to generate graphics to describe profiling... self.deeperViewItem = menu.Append( ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views') ) self.shallowerViewItem = menu.Append( ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views') ) # wx.ToolTip.Enable(True) menubar.Append(menu, _('&View')) self.viewTypeMenu =wx.Menu( ) menubar.Append(self.viewTypeMenu, _('View &Type')) self.SetMenuBar(menubar) wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True)) wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile) wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory) wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView) wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView) wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView) wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView) wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView) wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView) wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle)
def CreateMenuBar(self)
Create our menu-bar for triggering operations
2.782673
2.770555
1.004374
if editor and self.sourceCodeControl is None: self.sourceCodeControl = wx.py.editwindow.EditWindow( self.tabs, -1 ) self.sourceCodeControl.SetText(u"") self.sourceFileShown = None self.sourceCodeControl.setDisplayLineNumbers(True)
def CreateSourceWindow(self, tabs)
Create our source-view window for tabs
7.881212
7.386241
1.067013
tb = self.CreateToolBar(self.TBFLAGS) tsize = (24, 24) tb.ToolBitmapSize = tsize open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize) tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open", longHelp="Open a (c)Profile trace file") if not osx: tb.AddSeparator() # self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN) self.rootViewTool = tb.AddLabelTool( ID_ROOT_VIEW, _("Root View"), wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize), shortHelp=_("Display the root of the current view tree (home view)") ) self.rootViewTool = tb.AddLabelTool( ID_BACK_VIEW, _("Back"), wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize), shortHelp=_("Back to the previously activated node in the call tree") ) self.upViewTool = tb.AddLabelTool( ID_UP_VIEW, _("Up"), wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize), shortHelp=_("Go one level up the call tree (highest-percentage parent)") ) if not osx: tb.AddSeparator() # TODO: figure out why the control is sizing the label incorrectly on Linux self.percentageViewTool = wx.CheckBox(tb, -1, _("Percent ")) self.percentageViewTool.SetToolTip(wx.ToolTip( _("Toggle display of percentages in list views"))) tb.AddControl(self.percentageViewTool) wx.EVT_CHECKBOX(self.percentageViewTool, self.percentageViewTool.GetId(), self.OnPercentageView) self.viewTypeTool= wx.Choice( tb, -1, choices= getattr(self.loader,'ROOTS',[]) ) self.viewTypeTool.SetToolTip(wx.ToolTip( _("Switch between different hierarchic views of the data"))) wx.EVT_CHOICE( self.viewTypeTool, self.viewTypeTool.GetId(), self.OnViewTypeTool ) tb.AddControl( self.viewTypeTool ) tb.Realize()
def SetupToolBar(self)
Create the toolbar for common actions
3.089846
3.075563
1.004644
new = self.viewTypeTool.GetStringSelection() if new != self.viewType: self.viewType = new self.OnRootView( event )
def OnViewTypeTool( self, event )
When the user changes the selection, make that our selection
5.700421
4.450506
1.280848
self.viewTypeTool.SetItems( getattr( self.loader, 'ROOTS', [] )) if self.loader and self.viewType in self.loader.ROOTS: self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType )) # configure the menu with the available choices... def chooser( typ ): def Callback( event ): if typ != self.viewType: self.viewType = typ self.OnRootView( event ) return Callback # Clear all previous items for item in self.viewTypeMenu.GetMenuItems(): self.viewTypeMenu.DeleteItem( item ) if self.loader and self.loader.ROOTS: for root in self.loader.ROOTS: item = wx.MenuItem( self.viewTypeMenu, -1, root.title(), _("View hierarchy by %(name)s")%{ 'name': root.title(), }, kind=wx.ITEM_RADIO, ) item.SetCheckable( True ) self.viewTypeMenu.AppendItem( item ) item.Check( root == self.viewType ) wx.EVT_MENU( self, item.GetId(), chooser( root ))
def ConfigureViewTypeChoices( self, event=None )
Configure the set of View types in the toolbar (and menus)
3.357425
3.258338
1.03041
dialog = wx.FileDialog(self, style=wx.OPEN|wx.FD_MULTIPLE) if dialog.ShowModal() == wx.ID_OK: paths = dialog.GetPaths() if self.loader: # we've already got a displayed data-set, open new window... frame = MainFrame() frame.Show(True) frame.load(*paths) else: self.load(*paths)
def OnOpenFile(self, event)
Request to open a new profile file
4.283285
4.312622
0.993197
dialog = wx.FileDialog(self, style=wx.OPEN) if dialog.ShowModal() == wx.ID_OK: path = dialog.GetPath() if self.loader: # we've already got a displayed data-set, open new window... frame = MainFrame() frame.Show(True) frame.load_memory(path) else: self.load_memory(path)
def OnOpenMemory(self, event)
Request to open a new profile file
4.416763
4.328826
1.020314
self.directoryView = not self.directoryView self.packageMenuItem.Check(self.directoryView) self.packageViewTool.SetValue(self.directoryView) if self.loader: self.SetModel(self.loader) self.RecordHistory()
def SetPackageView(self, directoryView)
Set whether to use directory/package based view
5.685756
5.460964
1.041163
self.percentageView = percentageView self.percentageMenuItem.Check(self.percentageView) self.percentageViewTool.SetValue(self.percentageView) total = self.adapter.value( self.loader.get_root( self.viewType ) ) for control in self.ProfileListControls: control.SetPercentage(self.percentageView, total) self.adapter.SetPercentage(self.percentageView, total)
def SetPercentageView(self, percentageView)
Set whether to display percentage or absolute values
6.357919
6.301988
1.008875
node = self.activated_node parents = [] selected_parent = None if node: if hasattr( self.adapter, 'best_parent' ): selected_parent = self.adapter.best_parent( node ) else: parents = self.adapter.parents( node ) if parents: if not selected_parent: parents.sort(key = lambda a: self.adapter.value(node, a)) selected_parent = parents[-1] class event: node = selected_parent self.OnNodeActivated(event) else: self.SetStatusText(_('No parents for the currently selected node: %(node_name)s') % dict(node_name=self.adapter.label(node))) else: self.SetStatusText(_('No currently selected node'))
def OnUpView(self, event)
Request to move up the hierarchy to highest-weight parent
3.997908
3.84682
1.039276
self.historyIndex -= 1 try: self.RestoreHistory(self.history[self.historyIndex]) except IndexError, err: self.SetStatusText(_('No further history available'))
def OnBackView(self, event)
Request to move backward in the history
4.996084
4.120319
1.212548
self.adapter, tree, rows = self.RootNode() self.squareMap.SetModel(tree, self.adapter) self.RecordHistory() self.ConfigureViewTypeChoices()
def OnRootView(self, event)
Reset view to the root of the tree
29.451385
25.93762
1.13547
self.activated_node = self.selected_node = event.node self.squareMap.SetModel(event.node, self.adapter) self.squareMap.SetSelected( event.node ) if editor: if self.SourceShowFile(event.node): if hasattr(event.node,'lineno'): self.sourceCodeControl.GotoLine(event.node.lineno) self.RecordHistory()
def OnNodeActivated(self, event)
Double-click or enter on a node in some control...
10.10416
9.651823
1.046865
filename = self.adapter.filename( node ) if filename and self.sourceFileShown != filename: try: data = open(filename).read() except Exception, err: # TODO: load from zips/eggs? What about .pyc issues? return None else: #self.sourceCodeControl.setText(data) self.sourceCodeControl.ClearAll() self.sourceCodeControl.AppendText( data ) return filename
def SourceShowFile(self, node)
Show the given file in the source-code view (attempt it anyway)
8.047971
7.734732
1.040498
self.selected_node = event.node self.calleeListControl.integrateRecords(self.adapter.children( event.node) ) self.callerListControl.integrateRecords(self.adapter.parents( event.node) )
def OnSquareSelected(self, event)
Update all views to show selection children/parents
10.672609
8.31685
1.283251
self.squareMap.square_style = not self.squareMap.square_style self.squareMap.Refresh() self.moreSquareViewItem.Check(self.squareMap.square_style)
def OnMoreSquareToggle( self, event )
Toggle the more-square view (better looking, but more likely to filter records)
6.790854
4.442052
1.528765
if not self.restoringHistory: record = self.activated_node if self.historyIndex < -1: try: del self.history[self.historyIndex+1:] except AttributeError, err: pass if (not self.history) or record != self.history[-1]: self.history.append(record) del self.history[:-200] self.historyIndex = -1
def RecordHistory(self)
Add the given node to the history-set
4.966316
4.740478
1.04764
if len(filenames) == 1: if os.path.basename( filenames[0] ) == 'index.coldshot': return self.load_coldshot( os.path.dirname( filenames[0]) ) elif os.path.isdir( filenames[0] ): return self.load_coldshot( filenames[0] ) try: self.loader = pstatsloader.PStatsLoader(*filenames) self.ConfigureViewTypeChoices() self.SetModel( self.loader ) self.viewType = self.loader.ROOTS[0] self.SetTitle(_("Run Snake Run: %(filenames)s") % {'filenames': ', '.join(filenames)[:120]}) except (IOError, OSError, ValueError,MemoryError), err: self.SetStatusText( _('Failure during load of %(filenames)s: %(err)s' ) % dict( filenames=" ".join([repr(x) for x in filenames]), err=err ))
def load(self, *filenames)
Load our dataset (iteratively)
5.232174
5.248259
0.996935
self.loader = loader self.adapter, tree, rows = self.RootNode() self.listControl.integrateRecords(rows.values()) self.activated_node = tree self.squareMap.SetModel(tree, self.adapter) self.RecordHistory()
def SetModel(self, loader)
Set our overall model (a loader object) and populate sub-controls
22.483641
22.448759
1.001554
tree = self.loader.get_root( self.viewType ) adapter = self.loader.get_adapter( self.viewType ) rows = self.loader.get_rows( self.viewType ) adapter.SetPercentage(self.percentageView, adapter.value( tree )) return adapter, tree, rows
def RootNode(self)
Return our current root node and appropriate adapter for it
8.574542
7.368335
1.163701
if not config_parser.has_section( 'window' ): config_parser.add_section( 'window' ) if self.IsMaximized(): config_parser.set( 'window', 'maximized', str(True)) else: config_parser.set( 'window', 'maximized', str(False)) size = self.GetSizeTuple() position = self.GetPositionTuple() config_parser.set( 'window', 'width', str(size[0]) ) config_parser.set( 'window', 'height', str(size[1]) ) config_parser.set( 'window', 'x', str(position[0]) ) config_parser.set( 'window', 'y', str(position[1]) ) for control in self.ProfileListControls: control.SaveState( config_parser ) return config_parser
def SaveState( self, config_parser )
Retrieve window state to be restored on the next run...
1.816602
1.683735
1.078912
if not config_parser: return if ( not config_parser.has_section( 'window' ) or ( config_parser.has_option( 'window','maximized' ) and config_parser.getboolean( 'window', 'maximized' ) ) ): self.Maximize(True) try: width,height,x,y = [ config_parser.getint( 'window',key ) for key in ['width','height','x','y'] ] self.SetPosition( (x,y)) self.SetSize( (width,height)) except ConfigParser.NoSectionError, err: # the file isn't written yet, so don't even warn... pass except Exception, err: # this is just convenience, if it breaks in *any* way, ignore it... log.error( "Unable to load window preferences, ignoring: %s", traceback.format_exc() ) try: font_size = config_parser.getint('window', 'font_size') except Exception: pass # use the default, by default else: font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) font.SetPointSize(font_size) for ctrl in self.ProfileListControls: ctrl.SetFont(font) for control in self.ProfileListControls: control.LoadState( config_parser ) self.config = config_parser wx.EVT_CLOSE( self, self.OnCloseWindow )
def LoadState( self, config_parser )
Set our window state from the given config_parser instance
3.200789
3.118769
1.026299
wx.Image.AddHandler(self.handler) frame = MainFrame( config_parser = load_config()) frame.Show(True) self.SetTopWindow(frame) if profile: wx.CallAfter(frame.load, *[profile]) elif sys.argv[1:]: if sys.argv[1] == '-m': if sys.argv[2:]: wx.CallAfter( frame.load_memory, sys.argv[2] ) else: log.warn( 'No memory file specified' ) else: wx.CallAfter(frame.load, *sys.argv[1:]) return True
def OnInit(self, profile=None, memoryProfile=None)
Initialise the application
4.056163
4.088434
0.992107
wx.Image.AddHandler(self.handler) frame = MainFrame( config_parser = load_config()) frame.Show(True) self.SetTopWindow(frame) if sys.argv[1:]: wx.CallAfter( frame.load_memory, sys.argv[1] ) else: log.warn( 'No memory file specified' ) return True
def OnInit(self)
Initialise the application
5.414983
5.349722
1.012199
replace, insert, delete = "r", "i", "d" L1, L2 = len(seq1), len(seq2) if L1 < L2: L1, L2 = L2, L1 seq1, seq2 = seq2, seq1 ldiff = L1 - L2 if ldiff == 0: models = (insert+delete, delete+insert, replace+replace) elif ldiff == 1: models = (delete+replace, replace+delete) elif ldiff == 2: models = (delete+delete,) else: return -1 res = 3 for model in models: i = j = c = 0 while (i < L1) and (j < L2): if seq1[i] != seq2[j]: c = c+1 if 2 < c: break if transpositions and ldiff != 2 \ and i < L1 - 1 and j < L2 - 1 \ and seq1[i+1] == seq2[j] and seq1[i] == seq2[j+1]: i, j = i+2, j+2 else: cmd = model[c-1] if cmd == delete: i = i+1 elif cmd == insert: j = j+1 else: assert cmd == replace i,j = i+1, j+1 else: i,j = i+1, j+1 if 2 < c: continue elif i < L1: if L1-i <= model[c:].count(delete): c = c + (L1-i) else: continue elif j < L2: if L2-j <= model[c:].count(insert): c = c + (L2-j) else: continue if c < res: res = c if res == 3: res = -1 return res
def fast_comp(seq1, seq2, transpositions=False)
Compute the distance between the two sequences `seq1` and `seq2` up to a maximum of 2 included, and return it. If the edit distance between the two sequences is higher than that, -1 is returned. If `transpositions` is `True`, transpositions will be taken into account for the computation of the distance. This can make a difference, e.g.: >>> fast_comp("abc", "bac", transpositions=False) 2 >>> fast_comp("abc", "bac", transpositions=True) 1 This is faster than `levenshtein` by an order of magnitude, but on the other hand is of limited use. The algorithm comes from `http://writingarchives.sakura.ne.jp/fastcomp`. I've added transpositions support to the original code.
2.178198
2.184932
0.996918
'''Checks if a path is an actual file that exists''' if not os.path.isfile(dirname): msg = "{0} is not an existing file".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
def is_file(dirname)
Checks if a path is an actual file that exists
3.219048
2.771497
1.161483
'''Checks if a path is an actual directory that exists''' if not os.path.isdir(dirname): msg = "{0} is not a directory".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
def is_dir(dirname)
Checks if a path is an actual directory that exists
2.823757
2.505523
1.127013
'''Checks if a path is an actual directory that exists or a file''' if not os.path.isdir(dirname) and not os.path.isfile(dirname): msg = "{0} is not a directory nor a file".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
def is_dir_or_file(dirname)
Checks if a path is an actual directory that exists or a file
2.978624
2.298029
1.296165
'''Relative path to absolute''' if (type(relpath) is object or type(relpath) is file): relpath = relpath.name return os.path.abspath(os.path.expanduser(relpath))
def fullpath(relpath)
Relative path to absolute
4.165759
4.44557
0.937059
'''Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator.''' # If it's only a single file, return this single file if os.path.isfile(inputpath): abs_path = fullpath(inputpath) yield os.path.dirname(abs_path), os.path.basename(abs_path) # Else if it's a folder, walk recursively and return every files else: for dirpath, dirs, files in walk(inputpath): if sorting: files.sort() dirs.sort() # sort directories in-place for ordered recursive walking for filename in files: yield (dirpath, filename)
def recwalk(inputpath, sorting=True)
Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator.
6.115767
3.541581
1.726847
'''From a path given in any format, converts to posix path format fromwinpath=True forces the input path to be recognized as a Windows path (useful on Unix machines to unit test Windows paths)''' if fromwinpath: pathparts = list(PureWindowsPath(path).parts) else: pathparts = list(PurePath(path).parts) if nojoin: return pathparts else: return posixpath.join(*pathparts)
def path2unix(path, nojoin=False, fromwinpath=False)
From a path given in any format, converts to posix path format fromwinpath=True forces the input path to be recognized as a Windows path (useful on Unix machines to unit test Windows paths)
5.26053
1.891411
2.781273
if os.path.isdir(path): shutil.rmtree(path) return True elif os.path.isfile(path): os.remove(path) return True return False
def remove_if_exist(path): # pragma: no cover if os.path.exists(path)
Delete a file or a directory recursively if it exists, else no exception is raised
2.479503
2.056221
1.205854
remove_if_exist(dst) if os.path.exists(src): if os.path.isdir(src): if not only_missing: shutil.copytree(src, dst, symlinks=False, ignore=None) else: for dirpath, filepath in recwalk(src): srcfile = os.path.join(dirpath, filepath) relpath = os.path.relpath(srcfile, src) dstfile = os.path.join(dst, relpath) if not os.path.exists(dstfile): create_dir_if_not_exist(os.path.dirname(dstfile)) shutil.copyfile(srcfile, dstfile) shutil.copystat(srcfile, dstfile) return True elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)): shutil.copyfile(src, dst) shutil.copystat(src, dst) return True return False
def copy_any(src, dst, only_missing=False): # pragma: no cover if not only_missing
Copy a file or a directory tree, deleting the destination before processing
2.005626
1.90099
1.055043
Input: number of groups G per cluster, list of files F with respective sizes - Order F by descending size - Until F is empty: - Create a cluster X - A = Pop first item in F - Put A in X[0] (X[0] is thus the first group in cluster X) For g in 1..len(G)-1 : - B = Pop first item in F - Put B in X[g] - group_size := size(B) If group_size != size(A): While group_size < size(A): - Find next item C in F which size(C) <= size(A) - group_size - Put C in X[g] - group_size := group_size + size(C) ''' flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True)) if multi <= 1: fgrouped = {} i = 0 for x in flord.keys(): i += 1 fgrouped[i] = [[x]] return fgrouped fgrouped = {} i = 0 while flord: i += 1 fgrouped[i] = [] big_key, big_value = flord.popitem(0) fgrouped[i].append([big_key]) for j in xrange(multi-1): cluster = [] if not flord: break child_key, child_value = flord.popitem(0) cluster.append(child_key) if child_value == big_value: fgrouped[i].append(cluster) continue else: diff = big_value - child_value for key, value in flord.iteritems(): if value <= diff: cluster.append(key) del flord[key] if value == diff: break else: child_value += value diff = big_value - child_value fgrouped[i].append(cluster) return fgrouped
def group_files_by_size(fileslist, multi): # pragma: no cover ''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other. Pseudo-code (O(n^g) time complexity)
Cluster files into the specified number of groups, where each groups total size is as close as possible to each other. Pseudo-code (O(n^g) time complexity): Input: number of groups G per cluster, list of files F with respective sizes - Order F by descending size - Until F is empty: - Create a cluster X - A = Pop first item in F - Put A in X[0] (X[0] is thus the first group in cluster X) For g in 1..len(G)-1 : - B = Pop first item in F - Put B in X[g] - group_size := size(B) If group_size != size(A): While group_size < size(A): - Find next item C in F which size(C) <= size(A) - group_size - Put C in X[g] - group_size := group_size + size(C)
3.29549
1.837374
1.793587
def group_files_by_size_simple(fileslist, nbgroups): # pragma: no cover ford = sorted(fileslist.iteritems(), key=lambda x: x[1], reverse=True) ford = [[x[0]] for x in ford] return [group for group in grouper(nbgroups, ford)]
Simple and fast files grouping strategy: just order by size, and group files n-by-n, so that files with the closest sizes are grouped together. In this strategy, there is only one file per subgroup, and thus there will often be remaining space left because there is no filling strategy here, but it's very fast.
null
null
null
allitems = fgrouped.iteritems() elif isinstance(fgrouped, list): allitems = enumerate(fgrouped) for fkey, cluster in allitems: fsizes[fkey] = [] for subcluster in cluster: tot = 0 if subcluster is not None: for fname in subcluster: tot += fileslist[fname] total_files += 1 fsizes[fkey].append(tot) return fsizes, total_files
def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover '''Compute the total size per group and total number of files. Useful to check that everything is OK.''' fsizes = {} total_files = 0 allitems = None if isinstance(fgrouped, dict)
Compute the total size per group and total number of files. Useful to check that everything is OK.
2.829944
2.585479
1.094553
values = [c.GetValue() for c in chain(*self.widgets) if c.GetValue() is not None] return ' '.join(values)
def GetOptions(self)
returns the collective values from all of the widgets contained in the panel
8.69089
7.242584
1.199971
"itertools recipe: Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args)
def chunk(self, iterable, n, fillvalue=None)
itertools recipe: Collect data into fixed-length chunks or blocks
3.10025
2.400752
1.291366
''' Positionals have no associated options_string, so only the supplied arguments are returned. The order is assumed to be the same as the order of declaration in the client code Returns "argument_value" ''' self.AssertInitialization('Positional') if str(self._widget.GetValue()) == EMPTY: return None return self._widget.GetValue()
def GetValue(self)
Positionals have no associated options_string, so only the supplied arguments are returned. The order is assumed to be the same as the order of declaration in the client code Returns "argument_value"
17.591526
3.305499
5.321898