code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
''' Returns "--option_name argument" ''' self.AssertInitialization('Choice') if self._widget.GetValue() == self._DEFAULT_VALUE: return None return ' '.join( [self._action.option_strings[0] if self._action.option_strings else '', # get the verbose copy if available self._widget.GetValue()])
def GetValue(self)
Returns "--option_name argument"
14.533859
9.664623
1.503821
''' General options are key/value style pairs (conceptually). Thus the name of the option, as well as the argument to it are returned e.g. >>> myscript --outfile myfile.txt returns "--Option Value" ''' self.AssertInitialization('Optional') value = self._widget.GetValue() if not value or len(value) <= 0: return None return ' '.join( [self._action.option_strings[0], # get the verbose copy if available value])
def GetValue(self)
General options are key/value style pairs (conceptually). Thus the name of the option, as well as the argument to it are returned e.g. >>> myscript --outfile myfile.txt returns "--Option Value"
22.897085
4.659902
4.913641
''' Flag options have no param associated with them. Thus we only need the name of the option. e.g >>> Python -v myscript returns Options name for argument (-v) ''' if not self._widget.GetValue() or len(self._widget.GetValue()) <= 0: return None else: return self._action.option_strings[0]
def GetValue(self)
Flag options have no param associated with them. Thus we only need the name of the option. e.g >>> Python -v myscript returns Options name for argument (-v)
16.285006
2.916289
5.584153
''' Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox ''' if self._msg is None: return help_msg = self._msg width, height = size content_area = int((width / 3) * .70) wiggle_room = range(int(content_area - content_area * .05), int(content_area + content_area * .05)) if help_msg.Size[0] not in wiggle_room: self._msg.SetLabel(self._msg.GetLabelText().replace('\n', ' ')) self._msg.Wrap(content_area)
def Update(self, size)
Custom wrapper calculator to account for the increased size of the _msg widget after being inlined with the wx.CheckBox
7.625965
3.849396
1.981081
''' NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value ''' dropdown_value = self._widget.GetValue() if not str(dropdown_value).isdigit(): return None arg = str(self._action.option_strings[0]).replace('-', '') repeated_args = arg * int(dropdown_value) return '-' + repeated_args
def GetValue(self)
NOTE: Added on plane. Cannot remember exact implementation of counter objects. I believe that they count sequentail pairings of options e.g. -vvvvv But I'm not sure. That's what I'm going with for now. Returns str(action.options_string[0]) * DropDown Value
20.438246
2.483563
8.229404
''' Returns the full path to the language file ''' filename = language.lower() + '.json' lang_file_path = os.path.join(_DEFAULT_DIR, filename) if not os.path.exists(lang_file_path): raise IOError('Could not find {} language file'.format(language)) return lang_file_path
def get_path(language)
Returns the full path to the language file
3.107879
3.01955
1.029252
''' Open and return the supplied json file ''' global _DICTIONARY try: json_file = filename + '.json' with open(os.path.join(_DEFAULT_DIR, json_file), 'rb') as f: _DICTIONARY = json.load(f) except IOError: raise IOError('Language file not found. Make sure that your ', 'translation file is in the languages directory, ')
def load(filename)
Open and return the supplied json file
6.502768
5.897144
1.102698
try: s = repr(obj) if not clip or len(s) <= clip: return s else: return s[:clip-4]+'..'+s[-2:] except: return 'N/A'
def safe_repr(obj, clip=None)
Convert object to string representation, yielding the same result a `repr` but catches all exceptions and returns 'N/A' instead of raising the exception. Strings may be truncated by providing `clip`. >>> safe_repr(42) '42' >>> safe_repr('Clipped text', clip=8) 'Clip..xt' >>> safe_repr([1,2,3,4], clip=8) '[1,2..4]'
3.181073
3.390547
0.938218
s = str(obj) s = s.replace('\n', '|') if len(s) > max: if left: return '...'+s[len(s)-max+3:] else: return s[:(max-3)]+'...' else: return s
def trunc(obj, max, left=0)
Convert `obj` to string, eliminate newlines and truncate the string to `max` characters. If there are more characters in the string add ``...`` to the string. With `left=True`, the string can be truncated at the beginning. @note: Does not catch exceptions when converting `obj` to string with `str`. >>> trunc('This is a long text.', 8) This ... >>> trunc('This is a long text.', 8, left) ...text.
2.634687
3.30712
0.796671
degree = 0 pattern = "%4d %s" while i > base: pattern = "%7.2f %s" i = i / float(base) degree += 1 scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB'] return pattern % (i, scales[degree])
def pp(i, base=1024)
Pretty-print the integer `i` as a human-readable size representation.
4.483768
4.041377
1.109465
if t is None: return '' h, m, s = int(t / 3600), int(t / 60 % 60), t % 60 return "%02d:%02d:%05.2f" % (h, m, s)
def pp_timestamp(t)
Get a friendly timestamp represented as a string.
2.063226
2.0084
1.027299
if not stream: # pragma: no cover stream = sys.stdout self.metadata.sort(key=lambda x: -x.size) stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation')) for g in self.metadata: stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46))) stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \ (self.count, self.num_in_cycles, pp(self.total_size)))
def print_stats(self, stream=None)
Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given
4.630793
4.394371
1.053801
result = set(self.file_dict) # Ignore profiling code. __file__ does not always provide consistent # results with f_code.co_filename (ex: easy_install with zipped egg), # so inspect current frame instead. # XXX: assumes all of pprofile code resides in a single file. result.discard(inspect.currentframe().f_code.co_filename) return result
def getFilenameSet(self)
Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path.
14.280603
11.004395
1.297718
print >> out, 'version: 1' if commandline is not None: print >> out, 'cmd:', commandline print >> out, 'creator: pprofile' print >> out, 'event: usphit :us/hit' print >> out, 'events: hits us usphit' file_dict = self.file_dict if relative_path: convertPath = _relpath else: convertPath = lambda x: x for name in self._getFileNameList(filename): printable_name = convertPath(name) print >> out, 'fl=%s' % printable_name funcname = False call_list_by_line = file_dict[name].getCallListByLine() for lineno, func, firstlineno, hits, duration, _ in self._iterFile( name, call_list_by_line): call_list = call_list_by_line.get(lineno, ()) if not hits and not call_list: continue if func is None: func, firstlineno = call_list[0][:2] if funcname != func: funcname = func print >> out, 'fn=%s' % _getFuncOrFile(func, printable_name, firstlineno) ticks = int(duration * 1000000) if hits == 0: ticksperhit = 0 else: ticksperhit = ticks / hits print >> out, lineno, hits, ticks, int(ticksperhit) for _, _, hits, duration, callee_file, callee_line, \ callee_name in sorted(call_list, key=lambda x: x[2:4]): callee_file = convertPath(callee_file) print >> out, 'cfl=%s' % callee_file print >> out, 'cfn=%s' % _getFuncOrFile(callee_name, callee_file, callee_line) print >> out, 'calls=%s' % hits, callee_line duration *= 1000000 print >> out, lineno, hits, int(duration), int(duration / hits)
def callgrind(self, out, filename=None, commandline=None, relative_path=False)
Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code.
3.838192
3.644176
1.05324
file_dict = self.file_dict total_time = self.total_time if commandline is not None: print >> out, 'Command line:', commandline print >> out, 'Total duration: %gs' % total_time if not total_time: return def percent(value, scale): if scale == 0: return 0 return value * 100 / float(scale) for name in self._getFileNameList(filename): file_timing = file_dict[name] file_total_time = file_timing.getTotalTime() call_list_by_line = file_timing.getCallListByLine() print >> out, 'File:', name print >> out, 'File duration: %gs (%.2f%%)' % (file_total_time, percent(file_total_time, total_time)) print >> out, _ANNOTATE_HEADER print >> out, _ANNOTATE_HORIZONTAL_LINE for lineno, _, _, hits, duration, line in self._iterFile(name, call_list_by_line): if hits: time_per_hit = duration / hits else: time_per_hit = 0 print >> out, _ANNOTATE_FORMAT % { 'lineno': lineno, 'hits': hits, 'time': duration, 'time_per_hit': time_per_hit, 'percent': percent(duration, total_time), 'line': line, }, for _, _, hits, duration, callee_file, callee_line, \ callee_name in call_list_by_line.get(lineno, ()): print >> out, _ANNOTATE_CALL_FORMAT % { 'hits': hits, 'time': duration, 'time_per_hit': duration / hits, 'percent': percent(duration, total_time), 'callee_file': callee_file, 'callee_line': callee_line, 'callee_name': callee_name, }
def annotate(self, out, filename=None, commandline=None, relative_path=False)
Dump annotated source code with current profiling statistics to "out" file. Time unit: second. out (file-ish opened for writing) Destination of annotated sources. filename (str, list of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this annotation. relative_path (bool) For compatibility with callgrind. Ignored.
2.517225
2.467353
1.020213
if self.enabled_start: sys.settrace(None) self._disable() else: warn('Duplicate "disable" call')
def disable(self, threads=True)
Disable profiling.
14.797567
11.98873
1.23429
import __main__ dict = __main__.__dict__ return self.runctx(cmd, dict, dict)
def run(self, cmd)
Similar to profile.Profile.run .
7.293167
5.836564
1.249565
if not self.nostdout: self.stdout.write(data+end) if self.file is not None: self.file.write(data+end) if flush: self.flush()
def write(self, data, end="\n", flush=True)
Output data to stdout and/or file
3.180451
2.884949
1.102429
if not self.nostdout: self.stdout.flush() if self.file is not None: self.file.flush()
def flush(self)
Force commit changes to the file and stdout
4.553813
3.310708
1.37548
if normalized: return nlevenshtein(seq1, seq2, method=1) if seq1 == seq2: return 0 len1, len2 = len(seq1), len(seq2) if max_dist >= 0 and abs(len1 - len2) > max_dist: return -1 if len1 == 0: return len2 if len2 == 0: return len1 if len1 < len2: len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 column = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = x last = x - 1 for y in range(1, len2 + 1): old = column[y] cost = int(seq1[x - 1] != seq2[y - 1]) column[y] = min(column[y] + 1, column[y - 1] + 1, last + cost) last = old if max_dist >= 0 and min(column) > max_dist: return -1 if max_dist >= 0 and column[len2] > max_dist: # stay consistent, even if we have the exact distance return -1 return column[len2]
def levenshtein(seq1, seq2, normalized=False, max_dist=-1)
Compute the absolute Levenshtein distance between the two sequences `seq1` and `seq2`. The Levenshtein distance is the minimum number of edit operations necessary for transforming one sequence into the other. The edit operations allowed are: * deletion: ABC -> BC, AC, AB * insertion: ABC -> ABCD, EABC, AEBC.. * substitution: ABC -> ABE, ADC, FBC.. The `max_dist` parameter controls at which moment we should stop computing the distance between the provided sequences. If it is a negative integer, the distance will be computed until the sequences are exhausted; otherwise, the computation will stop at the moment the calculated distance is higher than `max_dist`, and then return -1. For example: >>> levenshtein("abc", "abcd", max_dist=1) # dist = 1 1 >>> levenshtein("abc", "abcde", max_dist=1) # dist = 2 -1 This can be a time saver if you're not interested in the exact distance, but only need to check if the distance between the given sequences is below a given threshold. The `normalized` parameter is here for backward compatibility; providing it will result in a call to `nlevenshtein`, which should be used directly instead.
2.044313
2.034002
1.00507
if seq1 == seq2: return 0.0 len1, len2 = len(seq1), len(seq2) if len1 == 0 or len2 == 0: return 1.0 if len1 < len2: # minimize the arrays size len1, len2 = len2, len1 seq1, seq2 = seq2, seq1 if method == 1: return levenshtein(seq1, seq2) / float(len1) if method != 2: raise ValueError("expected either 1 or 2 for `method` parameter") column = array('L', range(len2 + 1)) length = array('L', range(len2 + 1)) for x in range(1, len1 + 1): column[0] = length[0] = x last = llast = x - 1 for y in range(1, len2 + 1): # dist old = column[y] ic = column[y - 1] + 1 dc = column[y] + 1 rc = last + (seq1[x - 1] != seq2[y - 1]) column[y] = min(ic, dc, rc) last = old # length lold = length[y] lic = length[y - 1] + 1 if ic == column[y] else 0 ldc = length[y] + 1 if dc == column[y] else 0 lrc = llast + 1 if rc == column[y] else 0 length[y] = max(ldc, lic, lrc) llast = lold return column[y] / float(length[y])
def nlevenshtein(seq1, seq2, method=1)
Compute the normalized Levenshtein distance between `seq1` and `seq2`. Two normalization methods are provided. For both of them, the normalized distance will be a float between 0 and 1, where 0 means equal and 1 completely different. The computation obeys the following patterns: 0.0 if seq1 == seq2 1.0 if len(seq1) == 0 or len(seq2) == 0 edit distance / factor otherwise The `method` parameter specifies which normalization factor should be used. It can have the value 1 or 2, which correspond to the following: 1: the length of the shortest alignment between the sequences (that is, the length of the longest sequence) 2: the length of the longest alignment between the sequences Which normalization factor should be chosen is a matter of taste. The first one is cheap to compute. The second one is more costly, but it accounts better than the first one for parallelisms of symbols between the sequences. For the rationale behind the use of the second method, see: Heeringa, "Measuring Dialect Pronunciation Differences using Levenshtein Distance", 2004, p. 130 sq, which is available online at: http://www.let.rug.nl/~heeringa/dialectology/thesis/thesis.pdf
2.461288
2.372701
1.037336
return [ parent for parent in getattr( node, 'parents', [] ) if getattr(parent, 'tree', self.TREE) == self.TREE ]
def parents(self, node)
Determine all parents of node in our tree
7.428721
6.967319
1.066224
if not node.directory: # TODO: any cases other than built-ins? return None if node.filename == '~': # TODO: look up C/Cython/whatever source??? return None return os.path.join(node.directory, node.filename)
def filename( self, node )
Extension to squaremap api to provide "what file is this" information
9.9554
9.540918
1.043443
oid = id(obj) try: server.id2ref[oid] = obj except TypeError: server.id2obj[oid] = obj return str(oid)
def get_ref(obj)
Get string reference to object. Stores a weak reference in a dictionary using the object's id as the key. If the object cannot be weakly referenced (e.g. dictionaries, frame objects), store a strong references in a classic dictionary. Returns the object's id as a string.
6.012498
5.581436
1.077232
oid = int(ref) return server.id2ref.get(oid) or server.id2obj[oid]
def get_obj(ref)
Get object from string reference.
10.113517
10.252493
0.986445
pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
def process()
Get process overview.
9.725007
9.354098
1.039652
stats = server.stats if stats and stats.snapshots: stats.annotate() timeseries = [] for cls in stats.tracked_classes: series = [] for snapshot in stats.snapshots: series.append(snapshot.classes.get(cls, {}).get('sum', 0)) timeseries.append((cls, series)) series = [s.overhead for s in stats.snapshots] timeseries.append(("Profiling overhead", series)) if stats.snapshots[0].system_total.data_segment: # Assume tracked data resides in the data segment series = [s.system_total.data_segment - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Data segment", series)) series = [s.system_total.code_segment for s in stats.snapshots] timeseries.append(("Code segment", series)) series = [s.system_total.stack_segment for s in stats.snapshots] timeseries.append(("Stack segment", series)) series = [s.system_total.shared_segment for s in stats.snapshots] timeseries.append(("Shared memory", series)) else: series = [s.total - s.tracked_total - s.overhead for s in stats.snapshots] timeseries.append(("Other", series)) return dict(snapshots=stats.snapshots, timeseries=timeseries) else: return dict(snapshots=[])
def tracker_index()
Get tracker overview.
2.991062
2.942561
1.016483
stats = server.stats if not stats: bottle.redirect('/tracker') stats.annotate() return dict(stats=stats, clsname=clsname)
def tracker_class(clsname)
Get class instance details.
9.136868
9.063865
1.008054
graph = _compute_garbage_graphs()[int(index)] graph.reduce_to_cycles() objects = graph.metadata objects.sort(key=lambda x: -x.size) return dict(objects=objects, index=index)
def garbage_cycle(index)
Get reference cycle details.
9.648057
9.59478
1.005553
try: rendered = graph.rendered_file except AttributeError: try: graph.render(os.path.join(server.tmpdir, filename), format='png') rendered = filename except OSError: rendered = None graph.rendered_file = rendered return rendered
def _get_graph(graph, filename)
Retrieve or render a graph.
4.859572
4.493921
1.081366
graph = _compute_garbage_graphs()[int(index)] reduce_graph = bottle.request.GET.get('reduce', '') if reduce_graph: graph = graph.reduce_to_cycles() if not graph: return None filename = 'garbage%so%s.png' % (index, reduce_graph) rendered_file = _get_graph(graph, filename) if rendered_file: bottle.send_file(rendered_file, root=server.tmpdir) else: return None
def garbage_graph(index)
Get graph representation of reference cycle.
5.932586
5.917387
1.002569
if tracker and not stats: server.stats = tracker.stats else: server.stats = stats try: server.tmpdir = mkdtemp(prefix='pympler') server.server = PymplerServer(host=host, port=port, **kwargs) bottle.debug(debug) bottle.run(server=server.server) finally: rmtree(server.tmpdir)
def start_profiler(host='localhost', port=8090, tracker=None, stats=None, debug=False, **kwargs)
Start the web server to show profiling data. The function suspends the Python application (the current thread) until the web server is stopped. The only way to stop the server is to signal the running thread, e.g. press Ctrl+C in the console. If this isn't feasible for your application use `start_in_background` instead. During the execution of the web server, profiling data is (lazily) cached to improve performance. For example, garbage graphs are rendered when the garbage profiling data is requested and are simply retransmitted upon later requests. :param host: the host where the server shall run, default is localhost :param port: server listens on the specified port, default is 8090 to allow coexistance with common web applications :param tracker: `ClassTracker` instance, browse profiling data (on-line analysis) :param stats: `Stats` instance, analyze `ClassTracker` profiling dumps (useful for off-line analysis)
3.714391
4.425922
0.839236
k = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) try: # should check that it's valid? How? return _winreg.QueryValueEx( k, name )[0] finally: _winreg.CloseKey( k )
def _winreg_getShellFolder( name )
Get a shell folder by string name from the registry
2.572256
2.595421
0.991075
if shell: # on Win32 and have Win32all extensions, best-case return shell_getShellFolder(shellcon.CSIDL_APPDATA) if _winreg: # on Win32, but no Win32 shell com available, this uses # a direct registry access, likely to fail on Win98/Me return _winreg_getShellFolder( 'AppData' ) # okay, what if for some reason _winreg is missing? would we want to allow ctypes? ## default case, look for name in environ... for name in ['APPDATA', 'HOME']: if name in os.environ: return os.path.join( os.environ[name], '.config' ) # well, someone's being naughty, see if we can get ~ to expand to a directory... possible = os.path.abspath(os.path.expanduser( '~/.config' )) if os.path.exists( possible ): return possible raise OSError( )
def appdatadirectory( )
Attempt to retrieve the current user's app-data directory This is the location where application-specific files should be stored. On *nix systems, this will be the ${HOME}/.config directory. On Win32 systems, it will be the "Application Data" directory. Note that for Win32 systems it is normal to create a sub-directory for storing data in the Application Data directory.
10.594279
10.29398
1.029172
gc.collect() # Do not initialize local variables before calling gc.get_objects or those # will be included in the list. Furthermore, ignore frame objects to # prevent reference cycles. tmp = gc.get_objects() tmp = [o for o in tmp if not isframe(o)] res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = get_referents(o) for ref in refs: if not _is_containerobject(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) if remove_dups: res = _remove_duplicates(res) if include_frames: for sf in stack()[2:]: res.append(sf[0]) return res
def get_objects(remove_dups=True, include_frames=False)
Return a list of all known objects excluding frame objects. If (outer) frame objects shall be included, pass `include_frames=True`. In order to prevent building reference cycles, the current frame object (of the caller of get_objects) is ignored. This will not prevent creating reference cycles if the object list is passed up the call-stack. Therefore, frame objects are not included by default. Keyword arguments: remove_dups -- if True, all duplicate objects will be removed. include_frames -- if True, includes frame objects.
5.258598
5.23266
1.004957
res = 0 for o in objects: try: res += _getsizeof(o) except AttributeError: print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o))) return res
def get_size(objects)
Compute the total size of all elements in objects.
4.562701
3.936879
1.158964
res = {'+': [], '-': []} def partition(objects): res = {} for o in objects: t = type(o) if type(o) not in res: res[t] = [] res[t].append(o) return res def get_not_included(foo, bar): res = [] for o in foo: if not compat.object_in_list(type(o), bar): res.append(o) elif not compat.object_in_list(o, bar[type(o)]): res.append(o) return res # Create partitions of both lists. This will reduce the time required for # the comparison left_objects = partition(left) right_objects = partition(right) # and then do the diff res['+'] = get_not_included(right, left_objects) res['-'] = get_not_included(left, right_objects) return res
def get_diff(left, right)
Get the difference of both lists. The result will be a dict with this form {'+': [], '-': []}. Items listed in '+' exist only in the right list, items listed in '-' exist only in the left list.
3.366611
3.19982
1.052125
raise ValueError("minimum must be smaller than maximum") if Type is not None: res = [o for o in objects if isinstance(o, Type)] if min > -1: res = [o for o in res if _getsizeof(o) < min] if max > -1: res = [o for o in res if _getsizeof(o) > max] return res
def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter res = [] if min > max
Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size
2.481866
2.605587
0.952517
res = gc.get_referents(object) level -= 1 if level > 0: for o in res: res.extend(get_referents(o, level)) res = _remove_duplicates(res) return res
def get_referents(object, level=1)
Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive.
2.990699
3.611742
0.828049
# The usage of a function is calculated by creating one summary of all # objects before the function is invoked and afterwards. These summaries # are compared and the diff is returned. # This function works in a 2-steps process. Before the actual function is # invoked an empty dummy function is measurement to identify the overhead # involved in the measuring process. This overhead then is subtracted from # the measurement performed on the passed function. The result reflects the # actual usage of a function call. # Also, a measurement is performed twice, allowing the adjustment to # initializing things, e.g. modules res = None def _get_summaries(function, *args): s_before = summary.summarize(get_objects()) function(*args) s_after = summary.summarize(get_objects()) return (s_before, s_after) def _get_usage(function, *args): res = [] # init before calling (s_before, s_after) = _get_summaries(function, *args) # ignore all objects used for the measurement ignore = [] if s_before != s_after: ignore.append(s_before) for row in s_before: # ignore refs from summary and frame (loop) if len(gc.get_referrers(row)) == 2: ignore.append(row) for item in row: # ignore refs from summary and frame (loop) if len(gc.get_referrers(item)) == 2: ignore.append(item) for o in ignore: s_after = summary._subtract(s_after, o) res = summary.get_diff(s_before, s_after) return summary._sweep(res) # calibrate; twice for initialization def noop(): pass offset = _get_usage(noop) offset = _get_usage(noop) # perform operation twice to handle objects possibly used in # initialisation tmp = _get_usage(function, *args) tmp = _get_usage(function, *args) tmp = summary.get_diff(offset, tmp) tmp = summary._sweep(tmp) if len(tmp) != 0: res = tmp return res
def _get_usage(function, *args)
Test if more memory is used after the function has been called. The function will be invoked twice and only the second measurement will be considered. Thus, memory used in initialisation (e.g. loading modules) will not be included in the result. The goal is to identify memory leaks caused by functions which use more and more memory. Any arguments next to the function will be passed on to the function on invocation. Note that this function is currently experimental, because it is not tested thoroughly and performs poorly.
5.386458
5.386723
0.999951
seen = {} result = [] for item in objects: marker = id(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result
def _remove_duplicates(objects)
Remove duplicate objects. Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark
2.065442
2.040937
1.012006
boolean_actions = ( _StoreConstAction, _StoreFalseAction, _StoreTrueAction ) return [action for action in actions if action.option_strings and not action.choices and not isinstance(action, _CountAction) and not isinstance(action, _HelpAction) and type(action) not in boolean_actions]
def get_optionals_without_choices(self, actions)
All actions which are: (a) Optional, but without required choices (b) Not of a "boolean" type (storeTrue, etc..) (c) Of type _AppendAction e.g. anything which has an argument style like: >>> -f myfilename.txt
5.063588
4.249176
1.191664
return [action for action in actions if isinstance(action, _StoreTrueAction) or isinstance(action, _StoreFalseAction) or isinstance(action, _StoreConstAction)]
def get_flag_style_optionals(self, actions)
Gets all instances of "flag" type options. i.e. options which either store a const, or store boolean style options (e.g. StoreTrue). Types: _StoreTrueAction _StoreFalseAction _StoreConst
4.553603
3.109981
1.46419
''' Resizes a bitmap to a height of 89 pixels (the size of the top panel), while keeping aspect ratio in tact ''' image = wx.ImageFromBitmap(_bitmap) _width, _height = image.GetSize() if _height < target_height: return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image)) ratio = float(_width) / _height image = image.Scale(target_height * ratio, target_height, wx.IMAGE_QUALITY_HIGH) return wx.StaticBitmap(parent, -1, wx.BitmapFromImage(image))
def resize_bitmap(parent, _bitmap, target_height)
Resizes a bitmap to a height of 89 pixels (the size of the top panel), while keeping aspect ratio in tact
3.427727
1.918418
1.786747
for seq2 in seqs: dist = levenshtein(seq1, seq2, max_dist=max_dist) if dist != -1: yield dist, seq2
def ilevenshtein(seq1, seqs, max_dist=-1)
Compute the Levenshtein distance between the sequence `seq1` and the series of sequences `seqs`. `seq1`: the reference sequence `seqs`: a series of sequences (can be a generator) `max_dist`: if provided and > 0, only the sequences which distance from the reference sequence is lower or equal to this value will be returned. The return value is a series of pairs (distance, sequence). The sequence objects in `seqs` are expected to be of the same kind than the reference sequence in the C implementation; the same holds true for `ifast_comp`.
2.065551
2.552453
0.809241
for seq2 in seqs: dist = fast_comp(seq1, seq2, transpositions) if dist != -1: yield dist, seq2
def ifast_comp(seq1, seqs, transpositions=False)
Return an iterator over all the sequences in `seqs` which distance from `seq1` is lower or equal to 2. The sequences which distance from the reference sequence is higher than that are dropped. `seq1`: the reference sequence. `seqs`: a series of sequences (can be a generator) `transpositions` has the same sense than in `fast_comp`. The return value is a series of pairs (distance, sequence). You might want to call `sorted()` on the iterator to get the results in a significant order: >>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"]) >>> sorted(g) [(0, 'foo'), (1, 'fo'), (1, 'foob')]
3.070273
3.725477
0.824129
count = {} total_size = {} for o in objects: otype = _repr(o) if otype in count: count[otype] += 1 total_size[otype] += _getsizeof(o) else: count[otype] = 1 total_size[otype] = _getsizeof(o) rows = [] for otype in count: rows.append([otype, count[otype], total_size[otype]]) return rows
def summarize(objects)
Summarize an objects list. Return a list of lists, whereas each row consists of:: [str(type), number of objects of this type, total size of these objects]. No guarantee regarding the order is given.
2.180146
1.914801
1.138576
res = [] for row_r in right: found = False for row_l in left: if row_r[0] == row_l[0]: res.append([row_r[0], row_r[1] - row_l[1], row_r[2] - row_l[2]]) found = True if not found: res.append(row_r) for row_l in left: found = False for row_r in right: if row_l[0] == row_r[0]: found = True if not found: res.append([row_l[0], -row_l[1], -row_l[2]]) return res
def get_diff(left, right)
Get the difference of two summaries. Subtracts the values of the right summary from the values of the left summary. If similar rows appear on both sides, the are included in the summary with 0 for number of elements and total size. If the number of elements of a row of the diff is 0, but the total size is not, it means that objects likely have changed, but not there number, thus resulting in a changed size.
1.565744
1.599548
0.978867
localrows = [] for row in rows: localrows.append(list(row)) # input validation sortby = ['type', '#', 'size'] if sort not in sortby: raise ValueError("invalid sort, should be one of" + str(sortby)) orders = ['ascending', 'descending'] if order not in orders: raise ValueError("invalid order, should be one of" + str(orders)) # sort rows if sortby.index(sort) == 0: if order == "ascending": localrows.sort(key=lambda x: _repr(x[0])) elif order == "descending": localrows.sort(key=lambda x: _repr(x[0]), reverse=True) else: if order == "ascending": localrows.sort(key=lambda x: x[sortby.index(sort)]) elif order == "descending": localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True) # limit rows localrows = localrows[0:limit] for row in localrows: row[2] = stringutils.pp(row[2]) # print rows localrows.insert(0,["types", "# objects", "total size"]) _print_table(localrows)
def print_(rows, limit=15, sort='size', order='descending')
Print the rows as a summary. Keyword arguments: limit -- the maximum number of elements to be listed sort -- sort elements by 'size', 'type', or '#' order -- sort 'ascending' or 'descending'
2.316462
2.40894
0.96161
border = "=" # vertical delimiter vdelim = " | " # padding nr. of spaces are left around the longest element in the # column padding = 1 # may be left,center,right justify = 'right' justify = {'left' : str.ljust, 'center' : str.center, 'right' : str.rjust}[justify.lower()] # calculate column widths (longest item in each col # plus "padding" nr of spaces on both sides) cols = zip(*rows) colWidths = [max([len(str(item))+2*padding for item in col]) for col in cols] borderline = vdelim.join([w*border for w in colWidths]) for row in rows: print(vdelim.join([justify(str(item),width) for (item,width) in zip(row,colWidths)])) if header: print(borderline) header=False
def _print_table(rows, header=True)
Print a list of lists as a pretty table. Keyword arguments: header -- if True the first row is treated as a table header inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662
4.585232
4.495304
1.020005
res = "" t = type(o) if (verbosity == 0) or (t not in representations): res = str(t) else: verbosity -= 1 if len(representations[t]) < verbosity: verbosity = len(representations[t]) - 1 res = representations[t][verbosity](o) res = address.sub('', res) res = type_prefix.sub('', res) res = type_suffix.sub('', res) return res
def _repr(o, verbosity=1)
Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header
3.737111
4.392529
0.850788
function(summary, *args) for row in summary: function(row, *args) for item in row: function(item, *args)
def _traverse(summary, function, *args)
Traverse all objects of a summary and call function with each as a parameter. Using this function, the following objects will be traversed: - the summary - each row - each item of a row
3.253129
3.169893
1.026258
found = False row = [_repr(o), 1, _getsizeof(o)] for r in summary: if r[0] == row[0]: (r[1], r[2]) = (r[1] - row[1], r[2] - row[2]) found = True if not found: summary.append([row[0], -row[1], -row[2]]) return summary
def _subtract(summary, o)
Remove object o from the summary by subtracting it's size.
2.982005
2.69104
1.108124
#http://stackoverflow.com/questions/1401527/how-do-i-programmatically-check-whether-an-image-png-jpeg-or-gif-is-corrupted/1401565#1401565 # Check structure only for images (not supported for other types currently) if filepath.lower().endswith(tuple(img_filter)): try: #try: im = PIL.Image.open(filepath) #except IOError: # File format not supported by PIL, we skip the check_structure - ARG this is also raised if a supported image file is corrupted... #print("File: %s: DETECTNOPE" % filepath) #return None im.verify() # If an error occurred, the structure is corrupted except Exception as e: return str(e) # Else no exception, there's no corruption return False # Else the format does not currently support structure checking, we just return None to signal we didin't check else: return None
def check_structure(filepath)
Returns False if the file is okay, None if file format is unsupported by PIL/PILLOW, or returns an error string if the file is corrupt.
8.13451
7.35152
1.106507
'''Generate several hashes (md5 and sha1) in a single sweep of the file. Using two hashes lowers the probability of collision and false negative (file modified but the hash is the same). Supports big files by streaming blocks by blocks to the hasher automatically. Blocksize can be any multiple of 128.''' # Init hashers hasher_md5 = hashlib.md5() hasher_sha1 = hashlib.sha1() # Read the file blocks by blocks with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: # Compute both hashes at the same time hasher_md5.update(buf) hasher_sha1.update(buf) # Load the next data block from file buf = afile.read(blocksize) return (hasher_md5.hexdigest(), hasher_sha1.hexdigest())
def generate_hashes(filepath, blocksize=65536)
Generate several hashes (md5 and sha1) in a single sweep of the file. Using two hashes lowers the probability of collision and false negative (file modified but the hash is the same). Supports big files by streaming blocks by blocks to the hasher automatically. Blocksize can be any multiple of 128.
4.117771
1.786439
2.305016
'''Returns the degree of the polynomial''' if not poly: return self.degree #return len(self.coefficients) - 1 elif poly and hasattr("coefficients", poly): return len(poly.coefficients) - 1 else: while poly and poly[-1] == 0: poly.pop() # normalize return len(poly)-1
def get_degree(self, poly=None)
Returns the degree of the polynomial
3.955689
3.98949
0.991528
'''Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)''' if k > (self.degree + other.degree) or k > self.degree: return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0 term = 0 for i in _range(min(len(self), len(other))): coef1 = self.coefficients[-(k-i+1)] coef2 = other.coefficients[-(i+1)] if coef1 == 0 or coef2 == 0: continue # log(0) is undefined, skip (and in addition it's a nice optimization) term += coef1 * coef2 return term
def mul_at(self, other, k)
Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)
6.697862
4.242807
1.578639
'''Multiply a polynomial with a scalar''' return self.__class__([self.coefficients[i] * scalar for i in _range(len(self))])
def scale(self, scalar)
Multiply a polynomial with a scalar
6.642437
5.605564
1.184972
'''Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials. A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.''' # Note: for RS encoding, you should supply divisor = mprime (not m, you need the padded message) msg_out = list(dividend) # Copy the dividend normalizer = divisor[0] # precomputing for performance for i in _range(len(dividend)-(len(divisor)-1)): msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient if divisor[j] != 0: # log(0) is undefined so we need to avoid that case msg_out[i + j] += -divisor[j] * coef # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
def _fastdivmod(dividend, divisor)
Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials. A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.
10.32494
6.73611
1.532775
'''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int). Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)''' msg_out = list(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed for i in _range(len(dividend)-(len(divisor)-1)): coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization) for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic) #if divisor[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0 msg_out[i + j] ^= divisor[j] * coef # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef # Note: we could speed things up a bit if we could inline the table lookups, but the Polynomial class is generic, it doesn't know anything about the underlying fields and their operators. Good OOP design, bad for performances in Python because of function calls and the optimizations we can't do (such as precomputing gf_exp[divisor]). That's what is done in reedsolo lib, this is one of the reasons it is faster. # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:])
def _gffastdivmod(dividend, divisor)
Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int). Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)
13.376098
8.355291
1.600913
'''Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).''' # Holds the sum over each term in the polynomial #c = 0 # Holds the current power of x. This is multiplied by x after each term # in the polynomial is added up. Initialized to x^0 = 1 #p = 1 #for term in self.coefficients[::-1]: # c = c + term * p # p = p * x #return c # Faster alternative using Horner's Scheme y = self[0] for i in _range(1, len(self)): y = y * x + self.coefficients[i] return y
def evaluate(self, x)
Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).
5.75845
4.342017
1.326215
'''Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum''' x_gf = self.coefficients[0].__class__(x) arr = [self.coefficients[-i]*x_gf**(i-1) for i in _range(len(self), 0, -1)] # if x == 1: arr = sum(self.coefficients) return arr, sum(arr)
def evaluate_array(self, x)
Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum
8.648911
4.261912
2.02935
'''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))''' #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.) #return Polynomial(res) # One liner way to do it (also a bit faster too) #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] ) # Another faster version L = len(self)-1 return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )
def derive(self)
Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))
6.202035
5.095854
1.217075
'''Generalized feature scaling (useful for variable error correction rate calculation)''' return a + float(x - xmin) * (b - a) / (xmax - xmin)
def feature_scaling(x, xmin, xmax, a=0, b=1)
Generalized feature scaling (useful for variable error correction rate calculation)
12.083534
3.587722
3.368024
'''From an ecc entry position (a list with starting and ending positions), extract the metadata fields (filename, filesize, ecc for both), and the starting/ending positions of the ecc stream (containing variably encoded blocks of hash and ecc per blocks of the original file's header)''' # Read the the beginning of the ecc entry blocksize = 65535 file.seek(entry_pos[0]) entry = file.read(blocksize) entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # TODO: do in a while loop in case the filename is really big (bigger than blocksize) - or in case we add intra-ecc for filename # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field_pos = [entry_pos[0]+fourth+len(field_delim), entry_pos[1]] # return the starting and ending position of the rest of the ecc track, which contains blocks of hash/ecc of the original file's content. # Place the cursor at the beginning of the ecc_field file.seek(ecc_field_pos[0]) # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field_pos": ecc_field_pos}
def entry_fields(file, entry_pos, field_delim="\xFF")
From an ecc entry position (a list with starting and ending positions), extract the metadata fields (filename, filesize, ecc for both), and the starting/ending positions of the ecc stream (containing variably encoded blocks of hash and ecc per blocks of the original file's header)
7.884447
6.129708
1.286268
'''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block eccfile.seek(entry_fields["ecc_field_pos"][0]) curpos = file.tell() ecc_curpos = eccfile.tell() while (ecc_curpos < entry_fields["ecc_field_pos"][1]): # continue reading the input file until we reach the position of the previously detected ending marker # Compute the current rate, depending on where we are inside the input file (headers? later stage?) if curpos < header_size or constantmode: # header stage: constant rate rate = resilience_rates[0] else: # later stage 2 or 3: progressive rate rate = feature_scaling(curpos, header_size, entry_fields["filesize"], resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # From the rate, compute the ecc parameters ecc_params = compute_ecc_params(max_block_size, rate, hasher) # Extract the message block from input file, given the computed ecc parameters mes = file.read(ecc_params["message_size"]) if len(mes) == 0: return # quit if message is empty (reached end-of-file), this is a safeguard if ecc pos ending was miscalculated (we thus only need the starting position to be correct) buf = eccfile.read(ecc_params["hash_size"]+ecc_params["ecc_size"]) hash = buf[:ecc_params["hash_size"]] ecc = buf[ecc_params["hash_size"]:] yield {"message": mes, "hash": hash, "ecc": ecc, "rate": rate, "ecc_params": ecc_params, "curpos": curpos, "ecc_curpos": ecc_curpos} # Prepare for the next iteration of the loop curpos = file.tell() ecc_curpos = eccfile.tell() # Just a quick safe guard against ecc ending marker misdetection file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() if curpos < size: print("WARNING: end of ecc track reached but not the end of file! Either the ecc ending marker was misdetected, or either the file hash changed! Some blocks maybe may not have been properly checked!")
def stream_entry_assemble(hasher, file, eccfile, entry_fields, max_block_size, header_size, resilience_rates, constantmode=False)
From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.
6.503183
5.485446
1.185534
'''Generate a stream of hash/ecc blocks, of variable encoding rate and size, given a file.''' curpos = file.tell() # init the reading cursor at the beginning of the file # Find the total size to know when to stop #size = os.fstat(file.fileno()).st_size # old way of doing it, doesn't work with StringIO objects file.seek(0, os.SEEK_END) # alternative way of finding the total size: go to the end of the file size = file.tell() file.seek(0, curpos) # place the reading cursor back at the beginning of the file # Main encoding loop while curpos < size: # Continue encoding while we do not reach the end of the file # Calculating the encoding rate if curpos < header_size: # if we are still reading the file's header, we use a constant rate rate = resilience_rates[0] else: # else we use a progressive rate for the rest of the file the we calculate on-the-fly depending on our current reading cursor position in the file rate = feature_scaling(curpos, header_size, size, resilience_rates[1], resilience_rates[2]) # find the rate for the current stream of data (interpolate between stage 2 and stage 3 rates depending on the cursor position in the file) # Compute the ecc parameters given the calculated rate ecc_params = compute_ecc_params(max_block_size, rate, hasher) #ecc_manager = ECCMan(max_block_size, ecc_params["message_size"]) # not necessary to create an ecc manager anymore, as it is very costly. Now we can specify a value for k on the fly (tables for all possible values of k are pre-generated in the reed-solomon libraries) # Compute the ecc and hash for the current message block mes = file.read(ecc_params["message_size"]) hash = hasher.hash(mes) ecc = ecc_manager.encode(mes, k=ecc_params["message_size"]) #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result yield [hash, ecc, ecc_params] # Prepare for next iteration curpos = file.tell()
def stream_compute_ecc_hash(ecc_manager, hasher, file, max_block_size, header_size, resilience_rates)
Generate a stream of hash/ecc blocks, of variable encoding rate and size, given a file.
5.367333
4.861855
1.103968
'''Generate a concatenated string of ecc stream of hash/ecc blocks, of constant encoding rate, given a string. NOTE: resilience_rate here is constant, you need to supply only one rate, between 0.0 and 1.0. The encoding rate will then be constant, like in header_ecc.py.''' fpfile = StringIO(string) ecc_stream = ''.join( [str(x[1]) for x in stream_compute_ecc_hash(ecc_manager, hasher, fpfile, max_block_size, len(string), [resilience_rate])] ) # "hack" the function by tricking it to always use a constant rate, by setting the header_size=len(relfilepath), and supplying the resilience_rate_intra instead of resilience_rate_s1 (the one for header) return ecc_stream
def compute_ecc_hash_from_string(string, ecc_manager, hasher, max_block_size, resilience_rate)
Generate a concatenated string of ecc stream of hash/ecc blocks, of constant encoding rate, given a string. NOTE: resilience_rate here is constant, you need to supply only one rate, between 0.0 and 1.0. The encoding rate will then be constant, like in header_ecc.py.
13.277224
4.778378
2.778605
# convert strings to StringIO object so that we can trick our ecc reading functions that normally works only on files fpfile = StringIO(field) fpfile_ecc = StringIO(ecc) fpentry_p = {"ecc_field_pos": [0, len(field)]} # create a fake entry_pos so that the ecc reading function works correctly # Prepare variables field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in stream_entry_assemble(hasher_intra, fpfile, fpfile_ecc, fpentry_p, max_block_size, len(field), [resilience_rate_intra], constantmode=True): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
def ecc_correct_intra_stream(ecc_manager_intra, ecc_params_intra, hasher_intra, resilience_rate_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False, max_block_size=65535)
Correct an intra-field with its corresponding intra-ecc if necessary
5.402874
5.321995
1.015197
ret = [] if timeout is not None: max_iter = timeout / interval elif isinstance(proc, int): # external process and no timeout max_iter = 1 else: # for a Python function wait until it finishes max_iter = float('inf') if str(proc).endswith('.py'): filename = _find_script(proc) with open(filename) as f: proc = f.read() raise NotImplementedError if isinstance(proc, (list, tuple)): if len(proc) == 1: f, args, kw = (proc[0], (), {}) elif len(proc) == 2: f, args, kw = (proc[0], proc[1], {}) elif len(proc) == 3: f, args, kw = (proc[0], proc[1], proc[2]) else: raise ValueError try: import multiprocessing except ImportError: print ('WARNING: cannot import module `multiprocessing`. Forcing to' ' run inplace.') # force inplace run_in_place = True if run_in_place: import threading main_thread = threading.Thread(target=f, args=args, kwargs=kw) else: main_thread = multiprocessing.Process(target=f, args=args, kwargs=kw) i = 0 main_thread.start() pid = getattr(main_thread, 'pid', os.getpid()) while i < max_iter and main_thread.is_alive(): m = _get_memory(pid) ret.append(m) time.sleep(interval) i += 1 main_thread.join() else: # external process if proc == -1: proc = os.getpid() if max_iter == -1: max_iter = 1 for _ in range(max_iter): ret.append(_get_memory(proc)) time.sleep(interval) return ret
def memory_usage(proc=-1, interval=.1, timeout=None, run_in_place=False)
Return the memory usage of a process or piece of code Parameters ---------- proc : {int, string, tuple}, optional The process to monitor. Can be given by a PID, by a string containing a filename or by a tuple. The tuple should contain three values (f, args, kw) specifies to run the function f(*args, **kw). Set to -1 (default) for current process. interval : float, optional timeout : float, optional run_in_place : boolean, optional. False by default If False fork the process and retrieve timings from a different process. You shouldn't need to change this unless you are affected by this (http://blog.vene.ro/2012/07/04/on-why-my-memit-fails-on-osx) bug. Returns ------- mm : list of integers, size less than num memory usage, in KB
2.647977
2.597213
1.019546
if os.path.isfile(script_name): return script_name path = os.getenv('PATH', os.defpath).split(os.pathsep) for dir in path: if dir == '': continue fn = os.path.join(dir, script_name) if os.path.isfile(fn): return fn print >> sys.stderr, 'Could not find script {0}'.format(script_name) raise SystemExit(1)
def _find_script(script_name)
Find the script. If the input is not a file, then $PATH will be searched.
2.070714
2.013021
1.02866
opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False) repeat = int(getattr(opts, 'r', 1)) if repeat < 1: repeat == 1 timeout = int(getattr(opts, 't', 0)) if timeout <= 0: timeout = None run_in_place = hasattr(opts, 'i') mem_usage = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout, run_in_place=run_in_place) if mem_usage: print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage))) else: print('ERROR: could not read memory usage, try with a lower interval or more iterations')
def magic_memit(self, line='')
Measure memory usage of a Python statement Usage, in line mode: %memit [-ir<R>t<T>] statement Options: -r<R>: repeat the loop iteration <R> times and take the best result. Default: 1 -i: run the code in the current environment, without forking a new process. This is required on some MacOS versions of Accelerate if your line contains a call to `np.dot`. -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None Examples -------- :: In [1]: import numpy as np In [2]: %memit np.zeros(1e7) maximum of 1: 76.402344 MB per loop In [3]: %memit np.ones(1e6) maximum of 1: 7.820312 MB per loop In [4]: %memit -r 10 np.empty(1e8) maximum of 10: 0.101562 MB per loop In [5]: memit -t 3 while True: pass; Subprocess timed out. Subprocess timed out. Subprocess timed out. ERROR: all subprocesses exited unsuccessfully. Try again with the `-i` option. maximum of 1: -inf MB per loop
5.410284
4.685875
1.154594
try: # func_code does not exist in Python3 code = func.__code__ except AttributeError: import warnings warnings.warn("Could not extract a code object for the object %r" % (func,)) return if code not in self.code_map: self.code_map[code] = {} self.functions.append(func)
def add_function(self, func)
Record line profiling information for the given Python function.
4.448222
4.438947
1.002089
def f(*args, **kwds): self.enable_by_count() try: result = func(*args, **kwds) finally: self.disable_by_count() return result return f
def wrap_function(self, func)
Wrap a function to profile it.
3.322263
2.690369
1.234873
self.enable_by_count() try: exec(cmd, globals, locals) finally: self.disable_by_count() return self
def runctx(self, cmd, globals, locals)
Profile a single executable statement in the given namespaces.
4.211448
3.779657
1.114241
# XXX where is this used ? can be removed ? self.enable_by_count() try: return func(*args, **kw) finally: self.disable_by_count()
def runcall(self, func, *args, **kw)
Profile a single function call.
5.765461
4.86354
1.185445
if self.enable_count > 0: self.enable_count -= 1 if self.enable_count == 0: self.disable()
def disable_by_count(self)
Disable the profiler if the number of disable requests matches the number of enable requests.
2.83458
2.723504
1.040784
if event in ('line', 'return') and frame.f_code in self.code_map: lineno = frame.f_lineno if event == 'return': lineno += 1 entry = self.code_map[frame.f_code].setdefault(lineno, []) entry.append(_get_memory(os.getpid())) return self.trace_memory_usage
def trace_memory_usage(self, frame, event, arg)
Callback for sys.settrace
3.401297
3.347415
1.016097
''' Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias. ''' # -- Parsing the Makefile using ConfigParser # Adding a fake section to make the Makefile a valid Ini file ini_str = '[root]\n' with open(filepath, 'r') as fd: ini_str = ini_str + fd.read().replace('@make ', '') ini_fp = StringIO.StringIO(ini_str) # Parse using ConfigParser config = ConfigParser.RawConfigParser() config.readfp(ini_fp) # Fetch the list of aliases aliases = config.options('root') # -- Extracting commands for each alias commands = {} for alias in aliases: # strip the first line return, and then split by any line return commands[alias] = config.get('root', alias).lstrip('\n').split('\n') # -- Commands substitution # Loop until all aliases are substituted by their commands: # Check each command of each alias, and if there is one command that is to # be substituted by an alias, try to do it right away. If this is not # possible because this alias itself points to other aliases , then stop # and put the current alias back in the queue to be processed again later. # Create the queue of aliases to process aliases_todo = commands.keys() # Create the dict that will hold the full commands commands_new = {} # Loop until we have processed all aliases while aliases_todo: # Pick the first alias in the queue alias = aliases_todo.pop(0) # Create a new entry in the resulting dict commands_new[alias] = [] # For each command of this alias for cmd in commands[alias]: # Ignore self-referencing (alias points to itself) if cmd == alias: pass # Substitute full command elif cmd in aliases and cmd in commands_new: # Append all the commands referenced by the alias commands_new[alias].extend(commands_new[cmd]) # Delay substituting another alias, waiting for the other alias to # be substituted first elif cmd in aliases and cmd not in commands_new: # Delete the current entry to avoid other aliases # to reference this one wrongly (as it is empty) del commands_new[alias] aliases_todo.append(alias) break # Full command (no aliases) else: commands_new[alias].append(cmd) commands = commands_new del commands_new # -- Prepending prefix to avoid conflicts with standard setup.py commands # for alias in commands.keys(): # commands['make_'+alias] = commands[alias] # del commands[alias] return commands
def parse_makefile_aliases(filepath)
Parse a makefile to find commands and substitute variables. Expects a makefile with only aliases and a line return between each command. Returns a dict, with a list of commands for each alias.
4.982643
4.274975
1.165537
self.__run_backup = self.run self.run = self.__run # Force the Thread to install our trace. threading.Thread.start(self)
def start(self)
Start the thread.
15.389828
9.939478
1.548354
sys.settrace(self.globaltrace) self.__run_backup() self.run = self.__run_backup
def __run(self)
Hacked run function, which installs the trace.
8.685531
6.280484
1.38294
if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False
def codepoint_included(self, codepoint)
Check if codepoint matches any of the defined codepoints.
2.559132
2.286082
1.11944
frame_info = inspect.getframeinfo(frame) cp = (frame_info[0], frame_info[2], frame_info[1]) if self.codepoint_included(cp): objects = muppy.get_objects() size = muppy.get_size(objects) if cp not in self.memories: self.memories[cp] = [0,0,0,0] self.memories[cp][0] = 1 self.memories[cp][1] = size self.memories[cp][2] = size else: self.memories[cp][0] += 1 if self.memories[cp][1] > size: self.memories[cp][1] = size if self.memories[cp][2] < size: self.memories[cp][2] = size
def profile(self, frame, event, arg): #PYCHOK arg requ. to match signature if (self.events == None) or (event in self.events)
Profiling method used to profile matching codepoints and events.
2.524901
2.262804
1.115828
''' Run the functions profiler and save the result If timeout is greater than 0, the profile will automatically stops after timeout seconds ''' if noprofiler == True: print('ERROR: profiler and/or pstats library missing ! Please install it (probably package named python-profile) before running a profiling !') return False # This is the main function for profiling def _profile(): profile.run(mainfunction, output) print('=> RUNNING FUNCTIONS PROFILER\n\n'); sys.stdout.flush(); # Calibrate the profiler (only use this if the profiler produces some funny stuff, but calibration can also produce even more funny stuff with the latest cProfile of Python v2.7! So you should only enable calibration if necessary) if calibrate: print('Calibrating the profiler...'); sys.stdout.flush(); cval = calibrateprofile() print('Calibration found value : %s' % cval); sys.stdout.flush(); print('Initializing the profiler...'); sys.stdout.flush(); # Run in timeout mode (if the function cannot ends by itself, this is the best mode: the function must ends for the profile to be saved) if timeout > 0: pthread = KThread(target=_profile) # we open the function with the profiler, in a special killable thread (see below why) print('Will now run the profiling and terminate it in %s seconds. Results will be saved in %s' % (str(timeout), str(output))); sys.stdout.flush(); print('\nCountdown:'); sys.stdout.flush(); for i in range(0,5): print(str(5-i)) sys.stdout.flush() time.sleep(1) print('0\nStarting to profile...'); sys.stdout.flush(); pthread.start() # starting the thread time.sleep(float(timeout)) # after this amount of seconds, the thread gets killed and the profiler will end its job print('\n\nFinishing the profile and saving to the file %s' % str(output)); sys.stdout.flush(); pthread.kill() # we must end the main function in order for the profiler to output its results (if we didn't launch a thread and just closed the process, it would have done no result) # Run in full length mode (we run the function until it ends) else: print("Running the profiler, please wait until the process terminates by itself (if you forcequit before, the profile won't be saved)") _profile() print('=> Functions Profile done !') return True
def runprofile(mainfunction, output, timeout = 0, calibrate=False)
Run the functions profiler and save the result If timeout is greater than 0, the profile will automatically stops after timeout seconds
8.13504
7.385619
1.10147
''' Calibrate the profiler (necessary to have non negative and more exact values) ''' pr = profile.Profile() calib = [] crepeat = 10 for i in range(crepeat): calib.append(pr.calibrate(10000)) final = sum(calib) / crepeat profile.Profile.bias = final # Apply computed bias to all Profile instances created hereafter return final
def calibrateprofile()
Calibrate the profiler (necessary to have non negative and more exact values)
12.050161
6.990694
1.723743
''' Parse a profile log and print the result on screen ''' file = open(out, 'w') # opening the output file print('Opening the profile in %s...' % profilelog) p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file print('Generating the stats, please wait...') file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats('cumulative').print_stats(100) file.write("=== Time:\n") p.sort_stats('time').print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats('time', 'cum').print_stats(.5, 'init') file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() #p.print_callers(.5, 'init') #p.add('fooprof') file.close() print('Stats generated and saved to %s.' % out) print('Everything is done. Exiting')
def parseprofile(profilelog, out)
Parse a profile log and print the result on screen
3.743983
3.534414
1.059294
''' Browse interactively a profile log in console ''' print('Starting the pstats profile browser...\n') try: browser = ProfileBrowser(profilelog) print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started." browser.cmdloop() print >> browser.stream, "Goodbye." except KeyboardInterrupt: pass
def browseprofile(profilelog)
Browse interactively a profile log in console
8.26009
6.356647
1.299441
''' Browse interactively a profile log in GUI using RunSnakeRun and SquareMap ''' from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing) app = runsnake.RunSnakeRunApp(0) app.OnInit(profilelog) #app.OnInit() app.MainLoop()
def browseprofilegui(profilelog)
Browse interactively a profile log in GUI using RunSnakeRun and SquareMap
16.810747
9.450219
1.778874
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 ''' Returns a list of primes < n ''' sieve = [True] * (n/2) for i in _range(3,int(n**0.5)+1,2): if sieve[i/2]: sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1) return [2] + [2*i+1 for i in _range(1,n/2) if sieve[i]]
def rwh_primes1(n)
Returns a list of primes < n
1.797484
1.781829
1.008786
'''Convert a Galois Field's number into a nice polynomial''' if x <= 0: return "0" b = 1 # init to 2^0 = 1 c = [] # stores the degrees of each term of the polynomials i = 0 # counter for b = 2^i while x > 0: b = (1 << i) # generate a number power of 2: 2^0, 2^1, 2^2, ..., 2^i. Equivalent to b = 2^i if x & b : # then check if x is divisible by the power of 2. Equivalent to x % 2^i == 0 # If yes, then... c.append(i) # append this power (i, the exponent, gives us the coefficient) x ^= b # and compute the remainder of x / b i = i+1 # increment to compute the next power of 2 return " + ".join(["x^%i" % y for y in c[::-1]])
def _to_binpoly(x)
Convert a Galois Field's number into a nice polynomial
5.590637
4.971738
1.124484
'''A slow multiply method. This method gives the same results as the other __mul__ method but without needing precomputed tables, thus it can be used to generate those tables. If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations). This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together. The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are: 1- carry-less addition and substraction (XOR in GF(2^p)) 2- modular reduction (to avoid duplicate values in the field) using a prime polynomial ''' r = 0 a = int(a) b = int(b) while b: # while b is not 0 if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!). b = b >> 1 # equivalent to b // 2 a = a << 1 # equivalent to a*2 if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR). return GF2int(r)
def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True)
A slow multiply method. This method gives the same results as the other __mul__ method but without needing precomputed tables, thus it can be used to generate those tables. If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations). This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together. The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are: 1- carry-less addition and substraction (XOR in GF(2^p)) 2- modular reduction (to avoid duplicate values in the field) using a prime polynomial
10.548196
3.252624
3.242981
'''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.''' ### Define bitwise carry-less operations as inner functions ### def cl_mult(x,y): '''Bitwise carry-less multiplication on integers''' z = 0 i = 0 while (y>>i) > 0: if y & (1<<i): z ^= x<<i i += 1 return z def bit_length(n): '''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()''' bits = 0 while n >> bits: bits += 1 return bits def cl_div(dividend, divisor=None): '''Bitwise carry-less long division on integers and returns the remainder''' # Compute the position of the most significant bit for each integers dl1 = bit_length(dividend) dl2 = bit_length(divisor) # If the dividend is smaller than the divisor, just exit if dl1 < dl2: return dividend # Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor) for i in _range(dl1-dl2,-1,-1): # Check that the dividend is divisible (useless for the first iteration but important for the next ones) if dividend & (1 << i+dl2-1): # If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction) dividend ^= divisor << i return dividend ### Main GF multiplication routine ### # Multiply the gf numbers result = cl_mult(x,y) # Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds if prim > 0: result = cl_div(result, prim) return result
def multiply_slow(x, y, prim=0x11b)
Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.
6.108754
4.21417
1.449575
source = source.strip() assert source.startswith( '{' ) assert source.endswith( '}' ) source = source[1:-1] result = {} for match in attr.finditer( source ): key = match.group('key') if match.group( 'list' ) is not None: value = [ int(x) for x in match.group( 'list' ).strip().replace(',',' ').split() ] elif match.group( 'int' ) is not None: value = int( match.group( 'int' )) elif match.group( 'string' ) is not None: def deescape( match ): return unichr( int( match.group(0)[2:], 16 )) value = match.group('string').decode( 'utf-8' ) value = escape.sub( deescape, value, ) value = simple_escape.sub( lambda x: x.group(1), value, ) else: raise RuntimeError( "Matched something we don't know how to process:", match.groupdict() ) result[key] = value return result
def loads( source )
Load json structure from meliae from source Supports only the required structures to support loading meliae memory dumps
2.967895
3.029755
0.979583
'''Encode a given string or list of values (between 0 and gf2_charac) with reed-solomon encoding. Returns a list of values (or a string if return_string is true) with the k message bytes and n-k parity bytes at the end. If a message is < k bytes long, it is assumed to be padded at the front with null bytes. The sequence returned is always n bytes long. If poly is not False, returns the encoded Polynomial object instead of the polynomial translated back to a string (useful for debugging) ''' n = self.n if not k: k = self.k if len(message)>k: raise ValueError("Message length is max %d. Message was %d" % (k, len(message))) # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(message, _str): message = [ord(x) for x in message] # Encode message as a polynomial: m = Polynomial([GF2int(x) for x in message]) # Shift polynomial up by n-k by multiplying by x^(n-k) to reserve the first n-k coefficients for the ecc. This effectively pad the message with \0 bytes for the lower coefficients (where the ecc will be placed). mprime = m * Polynomial([GF2int(1)] + [GF2int(0)]*(n-k)) # mprime = q*g + b for some q # so let's find b, the code word (ecc block): b = mprime % self.g[k] # Subtract out b, so now c = q*g, which is a way of xoring mprime and code word b, which is a way of just saying that we append the polynomial ecc code to the original message (we replace the padded 0 bytes of mprime with the code word) c = mprime - b # Since c is a multiple of g, it has (at least) n-k roots: α^1 through # α^(n-k) if not poly: # Turn the polynomial c back into a string ret = self._list_rjust(c.coefficients, n, 0) # rjust is useful for the nostrip feature if return_string and self.gf2_charac < 256: ret = self._list2str(ret) else: ret = c return ret
def encode(self, message, poly=False, k=None, return_string=True)
Encode a given string or list of values (between 0 and gf2_charac) with reed-solomon encoding. Returns a list of values (or a string if return_string is true) with the k message bytes and n-k parity bytes at the end. If a message is < k bytes long, it is assumed to be padded at the front with null bytes. The sequence returned is always n bytes long. If poly is not False, returns the encoded Polynomial object instead of the polynomial translated back to a string (useful for debugging)
9.8721
5.759612
1.714022
'''Verifies the codeword is valid by testing that the codeword (message+ecc) as a polynomial code divides g returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial c = Polynomial([GF2int(x) for x in r]) # This works too, but takes longer. Both checks are just as valid. #return (c*h)%gtimesh == Polynomial(x0=0) # Since all codewords are multiples of g, checking that codeword c divides g # suffices for validating a codeword. return c % g == Polynomial(x0=0)
def check(self, r, k=None)
Verifies the codeword is valid by testing that the codeword (message+ecc) as a polynomial code divides g returns True/False
10.54581
6.485884
1.625963
'''Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered. returns True/False ''' n = self.n if not k: k = self.k #h = self.h[k] g = self.g[k] # If we were given a string, convert to a list (important to support fields above 2^8) if isinstance(r, _str): r = [ord(x) for x in r] # Turn r into a polynomial r = Polynomial([GF2int(x) for x in r]) # Compute the syndromes: sz = self._syndromes(r, k=k) # Checking that the syndrome is all 0 is sufficient to check if there are no more any errors in the decoded message #return all(int(x) == 0 for x in sz) return sz.coefficients.count(GF2int(0)) == len(sz)
def check_fast(self, r, k=None)
Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered. returns True/False
7.988797
4.375891
1.825639
'''Left strip the specified value''' for i in _range(len(L)): if L[i] != val: return L[i:]
def _list_lstrip(self, L, val=0)
Left strip the specified value
5.340398
5.366714
0.995096
'''Left pad with the specified value to obtain a list of the specified width (length)''' length = max(0, width - len(L)) return [fillchar]*length + L
def _list_rjust(self, L, width, fillchar=0)
Left pad with the specified value to obtain a list of the specified width (length)
8.58801
3.808771
2.254798
'''Given the received codeword r in the form of a Polynomial object, computes the syndromes and returns the syndrome polynomial. Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). ''' n = self.n if not k: k = self.k # Note the + [GF2int(0)] : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions). # This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1. return Polynomial( [r.evaluate( GF2int(self.generator)**(l+self.fcr) ) for l in _range(n-k-1, -1, -1)] + [GF2int(0)], keep_zero=True )
def _syndromes(self, r, k=None)
Given the received codeword r in the form of a Polynomial object, computes the syndromes and returns the syndrome polynomial. Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
18.088335
10.96209
1.650081
'''Compute the erasures locator polynomial from the erasures positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.''' # See: http://ocw.usu.edu/Electrical_and_Computer_Engineering/Error_Control_Coding/lecture7.pdf and Blahut, Richard E. "Transform techniques for error control codes." IBM Journal of Research and development 23.3 (1979): 299-315. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.600&rep=rep1&type=pdf and also a MatLab implementation here: http://www.mathworks.com/matlabcentral/fileexchange/23567-reed-solomon-errors-and-erasures-decoder/content//RS_E_E_DEC.m erasures_loc = Polynomial([GF2int(1)]) # just to init because we will multiply, so it must be 1 so that the multiplication starts correctly without nulling any term # erasures_loc is very simple to compute: erasures_loc = prod(1 - x*alpha[j]**i) for i in erasures_pos and where alpha is the alpha chosen to evaluate polynomials (here in this library it's gf(3)). To generate c*x where c is a constant, we simply generate a Polynomial([c, 0]) where 0 is the constant and c is positionned to be the coefficient for x^1. See https://en.wikipedia.org/wiki/Forney_algorithm#Erasures for i in erasures_pos: erasures_loc = erasures_loc * (Polynomial([GF2int(1)]) - Polynomial([GF2int(self.generator)**i, 0])) return erasures_loc
def _find_erasures_locator(self, erasures_pos)
Compute the erasures locator polynomial from the erasures positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.
12.124003
5.456416
2.221972