code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
route = a[0] if a and isinstance(a[0], Route) else Route(*a, **ka) self.routes.append(route) if route.name: self.named[route.name] = route.format_str() if route.static: self.static[route.route] = route.target return gpatt = route.group_re() fpatt = route.flat_re() try: gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((route.target, gregexp)) except (AssertionError, IndexError), e: # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
def add(self, *a, **ka)
Adds a route->target pair or a Route object to the Router. See Route() for details.
4.172122
4.011259
1.040103
''' Mount a Bottle application to a specific URL prefix ''' if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') script_path = '/'.join(filter(None, script_path.split('/'))) path_depth = script_path.count('/') + 1 if not script_path: raise TypeError('Empty script_path. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(script_path): raise TypeError('Conflict with existing mount: %s' % other) @self.route('/%s/:#.*#' % script_path, method="ANY") def mountpoint(): request.path_shift(path_depth) return app.handle(request.path, request.method) self.mounts[script_path] = app
def mount(self, app, script_path)
Mount a Bottle application to a specific URL prefix
4.567283
4.288218
1.065077
if not self.serve: return HTTPError(503, "Server stopped") handler, args = self.match_url(url, method) if not handler: return HTTPError(404, "Not found:" + url) try: return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise return HTTPError(500, 'Unhandled exception', e, format_exc(10))
def handle(self, url, method)
Execute the handler bound to the specified url and method and return its output. If catchall is true, exceptions are catched and returned as HTTPError(500) objects.
3.92629
3.501444
1.121334
# Filtered types (recursive, because they may return anything) for testtype, filterfunc in self.castfilter: if isinstance(out, testtype): return self._cast(filterfunc(out), request, response) # Empty output is done here if not out: response.headers['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, list) and isinstance(out[0], (StringType, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, StringType): response.headers['Content-Length'] = str(len(out)) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) # Cast Files into iterables if hasattr(out, 'read') and 'wsgi.file_wrapper' in request.environ: out = request.environ.get('wsgi.file_wrapper', lambda x, y: iter(lambda: x.read(y), ''))(out, 1024*64) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, StringType): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ % type(first)), request, response)
def _cast(self, out, request, response, peek=None)
Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes
3.914973
3.78968
1.033062
''' :class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d ''' if self._header is None: self._header = HeaderDict() for key, value in self.environ.iteritems(): if key.startswith('HTTP_'): key = key[5:].replace('_','-').title() self._header[key] = value return self._header
def header(self)
:class:`HeaderDict` filled with request headers. HeaderDict keys are case insensitive str.title()d
4.96464
2.273993
2.183226
if self._GET is None: data = parse_qs(self.query_string, keep_blank_values=True) self._GET = MultiDict() for key, values in data.iteritems(): for value in values: self._GET[key] = value return self._GET
def GET(self)
The QUERY_STRING parsed into a MultiDict. Keys and values are strings. Multiple values per key are possible. See MultiDict for details.
2.655649
2.422725
1.096141
if self._COOKIES is None: raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE','')) self._COOKIES = {} for cookie in raw_dict.itervalues(): self._COOKIES[cookie.key] = cookie.value return self._COOKIES
def COOKIES(self)
Cookie information parsed into a dictionary. Secure cookies are NOT decoded automatically. See Request.get_cookie() for details.
2.716055
2.57792
1.053584
if not isinstance(value, basestring): sec = self.app.config['securecookie.key'] value = cookie_encode(value, sec).decode('ascii') #2to3 hack self.COOKIES[key] = value for k, v in kargs.iteritems(): self.COOKIES[key][k.replace('_', '-')] = v
def set_cookie(self, key, value, **kargs)
Add a new cookie with various options. If the cookie value is not a string, a secure cookie is created. Possible options are: expires, path, comment, domain, max_age, secure, version, httponly See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
5.377497
5.424513
0.991333
if not replace_str: replace_str = "\x00" try: with open(path, "r+b") as fh: if pos < 0: # if negative, we calculate the position backward from the end of file fsize = os.fstat(fh.fileno()).st_size pos = fsize + pos fh.seek(pos) fh.write(replace_str) except IOError: return False finally: try: fh.close() except Exception: pass return True
def tamper_file_at(path, pos=0, replace_str=None)
Tamper a file at the given position and using the given string
2.490473
2.460746
1.012081
if header and header > 0: blocksize = header tamper_count = 0 # total number of characters tampered in the file total_size = 0 # total buffer size, NOT necessarily the total file size (depends if you set header or not) with open(filepath, "r+b") as fh: # 'r+' allows to read AND overwrite characters. Else any other option won't allow both ('a+' read and append, 'w+' erases the file first then allow to read and write), and 'b' is just for binary because we can open any filetype. if proba >= 1: proba = 1.0/os.fstat(fh.fileno()).st_size * proba # normalizing probability if it's an integer (ie: the number of characters to flip on average) buf = fh.read(blocksize) # We process blocks by blocks because it's a lot faster (IO is still the slowest operation in any computing system) while len(buf) > 0: total_size += len(buf) if not block_proba or (random.random() < block_proba): # If block tampering is enabled, process only if this block is selected by probability pos2tamper = [] burst_remain = 0 # if burst is enabled and corruption probability is triggered, then we will here store the remaining number of characters to corrupt (the length is uniformly sampled over the range specified in arguments) # Create the list of bits to tamper (it's a lot more efficient to precompute the list of characters to corrupt, and then modify in the file the characters all at once) for i in xrange(len(buf)): if burst_remain > 0 or (random.random() < proba): # Corruption probability: corrupt only if below the bit-flip proba pos2tamper.append(i) # keep this character's position in the to-be-corrupted list if burst_remain > 0: # if we're already in a burst, we minus one and continue onto the next character burst_remain -= 1 elif burst_length: # else we're not in a burst, we create one (triggered by corruption probability: as soon as one character triggers the corruption probability, then we do a burst) burst_remain = random.randint(burst_length[0], burst_length[1]) - 1 # if burst is enabled, then we randomly (uniformly) pick a random length for the burst between the range specified, and since we already tampered one character, we minus 1 # If there's any character to tamper in the list, we tamper the string if pos2tamper: tamper_count = tamper_count + len(pos2tamper) #print("Before: %s" % buf) buf = bytearray(buf) # Strings in Python are immutable, thus we need to convert to a bytearray for pos in pos2tamper: if mode == 'e' or mode == 'erasure': # Erase the character (set a null byte) buf[pos] = 0 elif mode == 'n' or mode == 'noise': # Noising the character (set a random ASCII character) buf[pos] = random.randint(0,255) #print("After: %s" % buf) # Overwriting the string into the file prevpos = fh.tell() # need to store and place back the seek cursor because after the write, if it's the end of the file, the next read may be buggy (getting characters that are not part of the file) fh.seek(fh.tell()-len(buf)) # Move the cursor at the beginning of the string we just read fh.write(buf) # Overwrite it fh.seek(prevpos) # Restore the previous position after the string # If we only tamper the header, we stop here by setting the buffer to an empty string if header and header > 0: buf = '' # Else we continue to the next data block else: # Load the next characters from file buf = fh.read(blocksize) return [tamper_count, total_size]
def tamper_file(filepath, mode='e', proba=0.03, block_proba=None, blocksize=65535, burst_length=None, header=None)
Randomly tamper a file's content
6.117014
6.106776
1.001677
silent = kwargs.get('silent', False) if 'silent' in kwargs: del kwargs['silent'] filescount = 0 for _ in tqdm(recwalk(inputpath), desc='Precomputing', disable=silent): filescount += 1 files_tampered = 0 tamper_count = 0 total_size = 0 for dirname, filepath in tqdm(recwalk(inputpath), total=filescount, leave=True, desc='Tamper file n.', disable=silent): tcount, tsize = tamper_file(os.path.join(dirname, filepath), *args, **kwargs) if tcount > 0: tamper_count += tcount files_tampered += 1 total_size += tsize return [files_tampered, filescount, tamper_count, total_size]
def tamper_dir(inputpath, *args, **kwargs)
Randomly tamper the files content in a directory tree, recursively
3.119215
3.069766
1.016109
stack_trace = stack() try: self.trace = [] for frm in stack_trace[5:]: # eliminate our own overhead self.trace.insert(0, frm[1:]) finally: del stack_trace
def _save_trace(self)
Save current stack trace as formatted string.
9.07001
7.798491
1.163047
obj = self.ref() self.snapshots.append( (ts, sizer.asized(obj, detail=self._resolution_level)) ) if obj is not None: self.repr = safe_repr(obj, clip=128)
def track_size(self, ts, sizer)
Store timestamp and current size for later evaluation. The 'sizer' is a stateful sizing facility that excludes other tracked objects.
12.18241
11.322797
1.075919
size = 0 for (t, s) in self.snapshots: if t == timestamp: size = s.size return size
def get_size_at_time(self, timestamp)
Get the size of the object at a specific time (snapshot). If the object was not alive/sized at that instant, return 0.
4.418941
3.361033
1.314757
self.stop = False while not self.stop: self.tracker.create_snapshot() sleep(self.interval)
def run(self)
Loop until a stop signal is set.
7.468956
5.673672
1.316424
if self.system_total.available: return self.system_total.vsz elif self.asizeof_total: # pragma: no cover return self.asizeof_total else: # pragma: no cover return self.tracked_total
def total(self)
Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality.
7.17117
5.725076
1.25259
if not self.desc: return "%.3fs" % self.timestamp return "%s (%.3fs)" % (self.desc, self.timestamp)
def label(self)
Return timestamped label for this snapshot, or a raw timestamp.
4.802534
3.406073
1.409992
self.track_object(_self_, name=_observer_.name, resolution_level=_observer_.detail, keep=_observer_.keep, trace=_observer_.trace) _observer_.init(_self_, *args, **kwds)
def _tracker(self, _observer_, _self_, *args, **kwds)
Injected constructor for tracked classes. Call the actual constructor of the object and track the object. Attach to the object before calling the constructor to track the object with the parameters of the most specialized class.
7.487408
6.871406
1.089647
try: constructor = cls.__init__ except AttributeError: def constructor(self, *_args, **_kwargs): pass # Possible name clash between keyword arguments of the tracked class' # constructor and the curried arguments of the injected constructor. # Therefore, the additional argument has a 'magic' name to make it less # likely that an argument name clash occurs. self._observers[cls] = _ClassObserver(constructor, name, resolution_level, keep, trace) cls.__init__ = instancemethod( lambda *args, **kwds: func(self._observers[cls], *args, **kwds), None, cls )
def _inject_constructor(self, cls, func, name, resolution_level, keep, trace)
Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later.
6.210684
6.370925
0.974848
self._observers[cls].modify(name, detail, keep, trace)
def _track_modify(self, cls, name, detail, keep, trace)
Modify settings of a tracked class
5.953964
5.573164
1.068327
cls.__init__ = self._observers[cls].init del self._observers[cls]
def _restore_constructor(self, cls)
Restore the original constructor, lose track of class.
7.164706
5.091059
1.407312
tobj = self.objects[id(instance)] tobj.set_resolution_level(resolution_level)
def track_change(self, instance, resolution_level=0)
Change tracking options for the already tracked object 'instance'. If instance is not tracked, a KeyError will be raised.
5.934233
5.225284
1.135677
# Check if object is already tracked. This happens if track_object is # called multiple times for the same object or if an object inherits # from multiple tracked classes. In the latter case, the most # specialized class wins. To detect id recycling, the weak reference # is checked. If it is 'None' a tracked object is dead and another one # takes the same 'id'. if id(instance) in self.objects and \ self.objects[id(instance)].ref() is not None: return tobj = TrackedObject(instance, resolution_level=resolution_level, trace=trace) if name is None: name = instance.__class__.__name__ if not name in self.index: self.index[name] = [] self.index[name].append(tobj) self.objects[id(instance)] = tobj if keep: self._keepalive.append(instance)
def track_object(self, instance, name=None, resolution_level=0, keep=False, trace=False)
Track object 'instance' and sample size and lifetime information. Not all objects can be tracked; trackable objects are class instances and other objects that can be weakly referenced. When an object cannot be tracked, a `TypeError` is raised. :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object.
4.61617
4.697599
0.982666
if not isclass(cls): raise TypeError("only class objects can be tracked") if name is None: name = cls.__module__ + '.' + cls.__name__ if self._is_tracked(cls): self._track_modify(cls, name, resolution_level, keep, trace) else: self._inject_constructor(cls, self._tracker, name, resolution_level, keep, trace)
def track_class(self, cls, name=None, resolution_level=0, keep=False, trace=False)
Track all objects of the class `cls`. Objects of that type that already exist are *not* tracked. If `track_class` is called for a class already tracked, the tracking parameters are modified. Instantiation traces can be generated by setting `trace` to True. A constructor is injected to begin instance tracking on creation of the object. The constructor calls `track_object` internally. :param cls: class to be tracked, may be an old-style or a new-style class :param name: reference the class by a name, default is the concatenation of module and class name :param resolution_level: The recursion depth up to which referents are sized individually. Resolution level 0 (default) treats the object as an opaque entity, 1 sizes all direct referents individually, 2 also sizes the referents of the referents and so forth. :param keep: Prevent the object's deletion by keeping a (strong) reference to the object. :param trace: Save instantiation stack trace for each instance
3.229276
3.283269
0.983555
classes = list(self._observers.keys()) for cls in classes: self.detach_class(cls)
def detach_all_classes(self)
Detach from all tracked classes.
4.455241
3.87437
1.149927
self.detach_all_classes() self.objects.clear() self.index.clear() self._keepalive[:] = []
def detach_all(self)
Detach from all tracked classes and objects. Restore the original constructors and cleanse the tracking lists.
9.968792
7.350568
1.356193
if not self._periodic_thread: self._periodic_thread = PeriodicThread(self, interval, name='BackgroundMonitor') self._periodic_thread.setDaemon(True) self._periodic_thread.start() else: self._periodic_thread.interval = interval
def start_periodic_snapshots(self, interval=1.0)
Start a thread which takes snapshots periodically. The `interval` specifies the time in seconds the thread waits between taking snapshots. The thread is started as a daemon allowing the program to exit. If periodic snapshots are already active, the interval is updated.
2.860163
2.790048
1.02513
if self._periodic_thread and self._periodic_thread.isAlive(): self._periodic_thread.stop = True self._periodic_thread.join() self._periodic_thread = None
def stop_periodic_snapshots(self)
Post a stop signal to the thread that takes the periodic snapshots. The function waits for the thread to terminate which can take some time depending on the configured interval.
2.510918
2.347641
1.069549
try: # TODO: It is not clear what happens when memory is allocated or # released while this function is executed but it will likely lead # to inconsistencies. Either pause all other threads or don't size # individual objects in asynchronous mode. self.snapshot_lock.acquire() timestamp = _get_time() sizer = asizeof.Asizer() objs = [tobj.ref() for tobj in list(self.objects.values())] sizer.exclude_refs(*objs) # The objects need to be sized in a deterministic order. Sort the # objects by its creation date which should at least work for non-parallel # execution. The "proper" fix would be to handle shared data separately. tracked_objects = list(self.objects.values()) tracked_objects.sort(key=lambda x: x.birth) for tobj in tracked_objects: tobj.track_size(timestamp, sizer) snapshot = Snapshot() snapshot.timestamp = timestamp snapshot.tracked_total = sizer.total if compute_total: snapshot.asizeof_total = asizeof.asizeof(all=True, code=True) snapshot.system_total = pympler.process.ProcessMemoryInfo() snapshot.desc = str(description) # Compute overhead of all structures, use sizer to exclude tracked objects(!) snapshot.overhead = 0 if snapshot.tracked_total: snapshot.overhead = sizer.asizeof(self) if snapshot.asizeof_total: snapshot.asizeof_total -= snapshot.overhead self.snapshots.append(snapshot) finally: self.snapshot_lock.release()
def create_snapshot(self, description='', compute_total=False)
Collect current per instance statistics and saves total amount of memory associated with the Python process. If `compute_total` is `True`, the total consumption of all objects known to *asizeof* is computed. The latter might be very slow if many objects are mapped into memory at the time the snapshot is taken. Therefore, `compute_total` is set to `False` by default. The overhead of the `ClassTracker` structure is also computed. Snapshots can be taken asynchronously. The function is protected with a lock to prevent race conditions.
6.464937
5.933506
1.089564
'''_actions which are positional or possessing the `required` flag ''' return not action.option_strings and not isinstance(action, _SubParsersAction) or action.required == True
def is_required(action)
_actions which are positional or possessing the `required` flag
21.206989
6.904834
3.071325
'''From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header''' entry = entry.lstrip(field_delim) # if there was some slight adjustment error (example: the last ecc block of the last file was the field_delim, then we will start with a field_delim, and thus we need to remove the trailing field_delim which is useless and will make the field detection buggy). This is not really a big problem for the previous file's ecc block: the missing ecc characters (which were mistaken for a field_delim), will just be missing (so we will lose a bit of resiliency for the last block of the previous file, but that's not a huge issue, the correction can still rely on the other characters). # Find metadata fields delimiters positions # TODO: automate this part, just give in argument the number of field_delim to find, and the func will find the x field_delims (the number needs to be specified in argument because the field_delim can maybe be found wrongly inside the ecc stream, which we don't want) first = entry.find(field_delim) second = entry.find(field_delim, first+len(field_delim)) third = entry.find(field_delim, second+len(field_delim)) fourth = entry.find(field_delim, third+len(field_delim)) # Note: we do not try to find all the field delimiters because we optimize here: we just walk the string to find the exact number of field_delim we are looking for, and after we stop, no need to walk through the whole string. # Extract the content of the fields # Metadata fields relfilepath = entry[:first] filesize = entry[first+len(field_delim):second] relfilepath_ecc = entry[second+len(field_delim):third] filesize_ecc = entry[third+len(field_delim):fourth] # Ecc stream field (aka ecc blocks) ecc_field = entry[fourth+len(field_delim):] # Try to convert to an int, an error may happen try: filesize = int(filesize) except Exception, e: print("Exception when trying to detect the filesize in ecc field (it may be corrupted), skipping: ") print(e) #filesize = 0 # avoid setting to 0, we keep as an int so that we can try to fix using intra-ecc # entries = [ {"message":, "ecc":, "hash":}, etc.] #print(entry) #print(len(entry)) return {"relfilepath": relfilepath, "relfilepath_ecc": relfilepath_ecc, "filesize": filesize, "filesize_ecc": filesize_ecc, "ecc_field": ecc_field}
def entry_fields(entry, field_delim="\xFF")
From a raw ecc entry (a string), extract the metadata fields (filename, filesize, ecc for both), and the rest being blocks of hash and ecc per blocks of the original file's header
8.128524
6.381518
1.27376
'''From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.''' # Extract the header from the file if fileheader is None: with open(filepath, 'rb') as file: # filepath is the absolute path to the original file (the one with maybe corruptions, NOT the output repaired file!) # Compute the size of the buffer to read: either header_size if possible, but if the file is smaller than that then we will read the whole file. if entry_fields["filesize"] > 0 and entry_fields["filesize"] < header_size: fileheader = file.read(entry_fields["filesize"]) else: fileheader = file.read(header_size) # Cut the header and the ecc entry into blocks, and then assemble them so that we can easily process block by block entry_asm = [] for i, j in itertools.izip(xrange(0, len(fileheader), ecc_params["message_size"]), xrange(0, len(entry_fields["ecc_field"]), ecc_params["hash_size"] + ecc_params["ecc_size"])): # Extract each fields from each block mes = fileheader[i:i+ecc_params["message_size"]] hash = entry_fields["ecc_field"][j:j+ecc_params["hash_size"]] ecc = entry_fields["ecc_field"][j+ecc_params["hash_size"]:j+ecc_params["hash_size"]+ecc_params["ecc_size"]] entry_asm.append({"message": mes, "hash": hash, "ecc": ecc}) # Return a list of fields for each block return entry_asm
def entry_assemble(entry_fields, ecc_params, header_size, filepath, fileheader=None)
From an entry with its parameters (filename, filesize), assemble a list of each block from the original file along with the relative hash and ecc for easy processing later.
4.379595
3.336082
1.312796
'''Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.''' result = [] # If required parameters were not provided, we compute them if not message_size: ecc_params = compute_ecc_params(max_block_size, rate, hasher) message_size = ecc_params["message_size"] # Split the buffer string in blocks (necessary for Reed-Solomon encoding because it's limited to 255 characters max) for i in xrange(0, len(buf), message_size): # Compute the message block mes = buf[i:i+message_size] # Compute the ecc ecc = ecc_manager.encode(mes) # Compute the hash hash = hasher.hash(mes) #crc = zlib.crc32(mes) # DEPRECATED: CRC is not resilient enough #print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE # Return the result (either in string for easy writing into a file, or in a list for easy post-processing) if as_string: result.append("%s%s" % (str(hash),str(ecc))) else: result.append([hash, ecc]) return result
def compute_ecc_hash(ecc_manager, hasher, buf, max_block_size, rate, message_size=None, as_string=False)
Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.
4.212772
3.29984
1.276659
fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False)
Correct an intra-field with its corresponding intra-ecc if necessary
5.126153
5.05455
1.014166
result = [] idset = set([id(x) for x in graph]) for n in graph: refset = set([id(x) for x in get_referents(n)]) if refset.intersection(idset): result.append(n) return result
def _eliminate_leafs(self, graph)
Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs.
3.532697
2.975695
1.187184
cycles = self.objects[:] cnt = 0 while cnt != len(cycles): cnt = len(cycles) cycles = self._eliminate_leafs(cycles) self.objects = cycles return len(self.objects)
def _reduce_to_cycles(self)
Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the number of objects involved in reference cycles. If there are no cycles, `self.objects` will be an empty list and this method returns 0.
5.323651
3.442528
1.546436
if not self._reduced: reduced = copy(self) reduced.objects = self.objects[:] reduced.metadata = [] reduced.edges = [] self.num_in_cycles = reduced._reduce_to_cycles() reduced.num_in_cycles = self.num_in_cycles if self.num_in_cycles: reduced._get_edges() reduced._annotate_objects() for meta in reduced.metadata: meta.cycle = True else: reduced = None self._reduced = reduced return self._reduced
def reduce_to_cycles(self)
Iteratively eliminate leafs to reduce the set of objects to only those that build cycles. Return the reduced graph. If there are no cycles, None is returned.
4.275597
4.063719
1.052139
idset = set([id(x) for x in self.objects]) self.edges = set([]) for n in self.objects: refset = set([id(x) for x in get_referents(n)]) for ref in refset.intersection(idset): label = '' members = None if isinstance(n, dict): members = n.items() if not members: members = named_refs(n) for (k, v) in members: if id(v) == ref: label = k break self.edges.add(_Edge(id(n), ref, label))
def _get_edges(self)
Compute the edges for the reference graph. The function returns a set of tuples (id(a), id(b), ref) if a references b with the referent 'ref'.
3.72508
3.45425
1.078405
g = {} for x in self.metadata: g[x.id] = x idx = 0 for x in self.metadata: if not hasattr(x, 'group'): x.group = idx idx += 1 neighbors = set() for e in self.edges: if e.src == x.id: neighbors.add(e.dst) if e.dst == x.id: neighbors.add(e.src) for nb in neighbors: g[nb].group = min(x.group, getattr(g[nb], 'group', idx)) # Assign the edges to the respective groups. Both "ends" of the edge # should share the same group so just use the first object's group. for e in self.edges: e.group = g[e.src].group self._max_group = idx
def _annotate_groups(self)
Annotate the objects belonging to separate (non-connected) graphs with individual indices.
3.543773
3.451128
1.026845
self.metadata = [x for x in self.metadata if x.group == group] group_set = set([x.id for x in self.metadata]) self.objects = [obj for obj in self.objects if id(obj) in group_set] self.count = len(self.metadata) if self.metadata == []: return False self.edges = [e for e in self.edges if e.group == group] del self._max_group return True
def _filter_group(self, group)
Eliminate all objects but those which belong to `group`. ``self.objects``, ``self.metadata`` and ``self.edges`` are modified. Returns `True` if the group is non-empty. Otherwise returns `False`.
3.689259
2.839517
1.299256
self._annotate_groups() index = 0 for group in range(self._max_group): subgraph = copy(self) subgraph.metadata = self.metadata[:] subgraph.edges = self.edges.copy() if subgraph._filter_group(group): subgraph.total_size = sum([x.size for x in subgraph.metadata]) subgraph.index = index index += 1 yield subgraph
def split(self)
Split the graph into sub-graphs. Only connected objects belong to the same graph. `split` yields copies of the Graph object. Shallow copies are used that only replicate the meta-information, but share the same object list ``self.objects``. >>> from pympler.refgraph import ReferenceGraph >>> a = 42 >>> b = 'spam' >>> c = {a: b} >>> t = (1,2,3) >>> rg = ReferenceGraph([a,b,c,t]) >>> for subgraph in rg.split(): ... print subgraph.index 0 1
4.975585
5.736465
0.867361
graphs = list(self.split()) graphs.sort(key=lambda x: -len(x.metadata)) for index, graph in enumerate(graphs): graph.index = index return graphs
def split_and_sort(self)
Split the graphs into sub graphs and return a list of all graphs sorted by the number of nodes. The graph with most nodes is returned first.
4.47843
4.207932
1.064283
self.metadata = [] sizer = Asizer() sizes = sizer.asizesof(*self.objects) self.total_size = sizer.total for obj, sz in zip(self.objects, sizes): md = _MetaObject() md.size = sz md.id = id(obj) try: md.type = obj.__class__.__name__ except (AttributeError, ReferenceError): # pragma: no cover md.type = type(obj).__name__ md.str = safe_repr(obj, clip=128) self.metadata.append(md)
def _annotate_objects(self)
Extract meta-data describing the stored objects.
4.721756
4.262881
1.107644
s = [] header = '// Process this file with graphviz\n' s.append( header) s.append('digraph G {\n') s.append(' node [shape=box];\n') for md in self.metadata: label = trunc(md.str, 48).replace('"', "'") extra = '' if md.type == 'instancemethod': extra = ', color=red' elif md.type == 'frame': extra = ', color=orange' s.append(' "X%s" [ label = "%s\\n%s" %s ];\n' % \ (hex(md.id)[1:], label, md.type, extra)) for e in self.edges: extra = '' if e.label == '__dict__': extra = ',weight=100' s.append(' X%s -> X%s [label="%s"%s];\n' % \ (hex(e.src)[1:], hex(e.dst)[1:], e.label, extra)) s.append('}\n') return "".join(s)
def _get_graphviz_data(self)
Emit a graph representing the connections between the objects described within the metadata list. The text representation can be transformed to a graph with graphviz. Returns a string.
3.534089
3.363711
1.050652
if self.objects == []: return False data = self._get_graphviz_data() options = ('-Nfontsize=10', '-Efontsize=10', '-Nstyle=filled', '-Nfillcolor=#E5EDB8', '-Ncolor=#CCCCCC') cmdline = (cmd, '-T%s' % format, '-o', filename) + options if unflatten: p1 = Popen(('unflatten', '-l7'), stdin=PIPE, stdout=PIPE, **popen_flags) p2 = Popen(cmdline, stdin=p1.stdout, **popen_flags) p1.communicate(encode4pipe(data)) p2.communicate() return p2.returncode == 0 else: p = Popen(cmdline, stdin=PIPE, **popen_flags) p.communicate(encode4pipe(data)) return p.returncode == 0
def render(self, filename, cmd='dot', format='ps', unflatten=False)
Render the graph to `filename` using graphviz. The graphviz invocation command may be overriden by specifying `cmd`. The `format` may be any specifier recognized by the graph renderer ('-Txxx' command). The graph can be preprocessed by the *unflatten* tool if the `unflatten` parameter is True. If there are no objects to illustrate, the method does not invoke graphviz and returns False. If the renderer returns successfully (return code 0), True is returned. An `OSError` is raised if the graphviz tool cannot be found.
3.156688
3.063149
1.030537
f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
def write_graph(self, filename)
Write raw graph data which can be post-processed using graphviz.
3.287791
2.952686
1.113492
if not hasattr(self, '_root_frame'): self._root_frame = Frame() # define a recursive function that builds the hierarchy of frames given the # stack of frame identifiers def frame_for_stack(stack): if len(stack) == 0: return self._root_frame parent = frame_for_stack(stack[:-1]) frame_name = stack[-1] if not frame_name in parent.children_dict: parent.add_child(Frame(frame_name, parent)) return parent.children_dict[frame_name] for stack, self_time in self.stack_self_time.items(): frame_for_stack(stack).self_time = self_time return self._root_frame
def root_frame(self)
Returns the parsed results in the form of a tree of Frame objects
3.203909
2.86067
1.119986
global call_dict global call_stack global func_count global func_count_max global func_time global func_time_max global call_stack_timer call_dict = {} # current call stack call_stack = ['__main__'] # counters for each function func_count = {} func_count_max = 0 # accumative time per function func_time = {} func_time_max = 0 # keeps track of the start time of each call on the stack call_stack_timer = []
def reset_trace()
Resets all collected statistics. This is run automatically by start_trace(reset=True) and when the module is loaded.
4.002639
3.975758
1.006761
# TODO: Move these calls away from this function so it doesn't have to run # every time. lib_path = sysconfig.get_python_lib() path = os.path.split(lib_path) if path[1] == 'site-packages': lib_path = path[0] return file_name.lower().startswith(lib_path.lower())
def is_module_stdlib(file_name)
Returns True if the file_name is in the lib directory.
4.09865
3.816414
1.073953
global trace_filter global time_filter if reset: reset_trace() if filter_func: trace_filter = filter_func else: trace_filter = GlobbingFilter(exclude=['pycallgraph.*']) if time_filter_func: time_filter = time_filter_func else: time_filter = GlobbingFilter() sys.settrace(tracer)
def start_trace(reset=True, filter_func=None, time_filter_func=None)
Begins a trace. Setting reset to True will reset all previously recorded trace data. filter_func needs to point to a callable function that accepts the parameters (call_stack, module_name, class_name, func_name, full_name). Every call will be passed into this function and it is up to the function to decide if it should be included or not. Returning False means the call will be filtered out and not included in the call graph.
2.952655
2.952124
1.00018
global func_count_max global func_count global trace_filter global time_filter global call_stack global func_time global func_time_max if event == 'call': keep = True code = frame.f_code # Stores all the parts of a human readable name of the current call. full_name_list = [] # Work out the module name module = inspect.getmodule(code) if module: module_name = module.__name__ module_path = module.__file__ if not settings['include_stdlib'] \ and is_module_stdlib(module_path): keep = False if module_name == '__main__': module_name = '' else: module_name = '' if module_name: full_name_list.append(module_name) # Work out the class name. try: class_name = frame.f_locals['self'].__class__.__name__ full_name_list.append(class_name) except (KeyError, AttributeError): class_name = '' # Work out the current function or method func_name = code.co_name if func_name == '?': func_name = '__main__' full_name_list.append(func_name) # Create a readable representation of the current call full_name = '.'.join(full_name_list) # Load the trace filter, if any. 'keep' determines if we should ignore # this call if keep and trace_filter: keep = trace_filter(call_stack, module_name, class_name, func_name, full_name) # Store the call information if keep: if call_stack: fr = call_stack[-1] else: fr = None if fr not in call_dict: call_dict[fr] = {} if full_name not in call_dict[fr]: call_dict[fr][full_name] = 0 call_dict[fr][full_name] += 1 if full_name not in func_count: func_count[full_name] = 0 func_count[full_name] += 1 if func_count[full_name] > func_count_max: func_count_max = func_count[full_name] call_stack.append(full_name) call_stack_timer.append(time.time()) else: call_stack.append('') call_stack_timer.append(None) if event == 'return': if call_stack: full_name = call_stack.pop(-1) if call_stack_timer: t = call_stack_timer.pop(-1) else: t = None if t and time_filter(stack=call_stack, full_name=full_name): if full_name not in func_time: func_time[full_name] = 0 call_time = (time.time() - t) func_time[full_name] += call_time if func_time[full_name] > func_time_max: func_time_max = func_time[full_name] return tracer
def tracer(frame, event, arg)
This is an internal function that is called every time a call is made during a trace. It keeps track of relationships between calls.
2.34154
2.335372
1.002641
defaults = [] nodes = [] edges = [] # define default attributes for comp, comp_attr in graph_attributes.items(): attr = ', '.join( '%s = "%s"' % (attr, val) for attr, val in comp_attr.items() ) defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() ) # define nodes for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()] node_str = '"%s" [%s];' % (func, ', '.join(attribs)) nodes.append( node_str % locals() ) # define edges for fr_key, fr_val in call_dict.items(): if not fr_key: continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, totla_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) edge = '[ color = "%s", label="%s" ]' % (col, to_val) edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge)) defaults = '\n\t'.join( defaults ) nodes = '\n\t'.join( nodes ) edges = '\n\t'.join( edges ) dot_fmt = ("digraph G {\n" " %(defaults)s\n\n" " %(nodes)s\n\n" " %(edges)s\n}\n" ) return dot_fmt % locals()
def get_dot(stop=True)
Returns a string containing a DOT file. Setting stop to True will cause the trace to stop.
3.068514
3.040231
1.009303
ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \ 'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \ 'total_time DOUBLE, color VARCHAR, width DOUBLE'] for func, hits in func_count.items(): calls_frac, total_time_frac, total_time = _frac_calculation(func, hits) col = settings['node_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \ calls_frac, total_time_frac, total_time, color, \ math.log(hits * 10))) ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR') for fr_key, fr_val in call_dict.items(): if fr_key == '': continue for to_key, to_val in fr_val.items(): calls_frac, total_time_frac, total_time = \ _frac_calculation(to_key, to_val) col = settings['edge_colour'](calls_frac, total_time_frac) color = ','.join([str(round(float(c) * 255)) for c in col.split()]) ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color)) ret = '\n'.join(ret) return ret
def get_gdf(stop=True)
Returns a string containing a GDF file. Setting stop to True will cause the trace to stop.
2.967586
2.891423
1.026341
if stop: stop_trace() dot_data = get_dot() # normalize filename regex_user_expand = re.compile('\A~') if regex_user_expand.match(filename): filename = os.path.expanduser(filename) else: filename = os.path.expandvars(filename) # expand, just in case if format == 'dot': f = open(filename, 'w') f.write(dot_data) f.close() else: # create a temporary file to be used for the dot data fd, tempname = tempfile.mkstemp() with os.fdopen(fd, 'w') as f: f.write(dot_data) cmd = '%(tool)s -T%(format)s -o%(filename)s %(tempname)s' % locals() try: ret = os.system(cmd) if ret: raise PyCallGraphException( \ 'The command "%(cmd)s" failed with error ' \ 'code %(ret)i.' % locals()) finally: os.unlink(tempname)
def make_dot_graph(filename, format='png', tool='dot', stop=True)
Creates a graph using a Graphviz tool that supports the dot language. It will output into a file specified by filename with the format specified. Setting stop to True will stop the current trace.
2.979197
3.066354
0.971576
if stop: stop_trace() try: f = open(filename, 'w') f.write(get_gdf()) finally: if f: f.close()
def make_gdf_graph(filename, stop=True)
Create a graph in simple GDF format, suitable for feeding into Gephi, or some other graph manipulation and display tool. Setting stop to True will stop the current trace.
4.419555
3.649702
1.210936
cache = dict() def wrapper(*rest): if rest not in cache: cache[rest] = callable_object(*rest) return cache[rest] return wrapper
def simple_memoize(callable_object)
Simple memoization for functions without keyword arguments. This is useful for mapping code objects to module in this context. inspect.getmodule() requires a number of system calls, which may slow down the tracing considerably. Caching the mapping from code objects (there is *one* code object for each function, regardless of how many simultaneous activations records there are). In this context we can ignore keyword arguments, but a generic memoizer ought to take care of that as well.
3.322857
3.475592
0.956055
import subprocess, sys subprocess.call([ sys.argv[0] + '32' ]+sys.argv[1:], env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"} )
def macshim()
Shim to run 32-bit on 64-bit mac as a sub-process
10.43334
8.033613
1.298711
plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = hasattr(file_obj, 'isatty') and file_obj.isatty() if not supported_platform or not is_a_tty: return False return True
def file_supports_color(file_obj)
Returns True if the running system's terminal supports color, and False otherwise. Borrowed from Django https://github.com/django/django/blob/master/django/core/management/color.py
1.700361
1.765614
0.963042
'''Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.''' #message_size = max_block_size - int(round(max_block_size * rate * 2, 0)) # old way to compute, wasn't really correct because we applied the rate on the total message+ecc size, when we should apply the rate to the message size only (that is not known beforehand, but we want the ecc size (k) = 2*rate*message_size or in other words that k + k * 2 * rate = n) message_size = int(round(float(max_block_size) / (1 + 2*rate), 0)) ecc_size = max_block_size - message_size hash_size = len(hasher) # 32 when we use MD5 return {"message_size": message_size, "ecc_size": ecc_size, "hash_size": hash_size}
def compute_ecc_params(max_block_size, rate, hasher)
Compute the ecc parameters (size of the message, size of the hash, size of the ecc). This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object.
7.340345
4.590322
1.599092
'''Encode one message block (up to 255) into an ecc''' if not k: k = self.k message, _ = self.pad(message, k=k) if self.algo == 1: mesecc = self.ecc_manager.encode(message, k=k) elif self.algo == 2: mesecc = self.ecc_manager.encode_fast(message, k=k) elif self.algo == 3 or self.algo == 4: mesecc = rs_encode_msg(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) #mesecc = rs_encode_msg_precomp(message, self.n-k, fcr=self.fcr, gen=self.g[self.n-k]) ecc = mesecc[len(message):] return ecc
def encode(self, message, k=None)
Encode one message block (up to 255) into an ecc
3.944791
3.314589
1.19013
'''Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).''' if not k: k = self.k pad = None if len(message) < k: #pad = "\x00" * (k-len(message)) pad = bytearray(k-len(message)) message = pad + message return [message, pad]
def pad(self, message, k=None)
Automatically left pad with null bytes a message if too small, or leave unchanged if not necessary. This allows to keep track of padding and strip the null bytes after decoding reliably with binary data. Equivalent to shortening (shortened reed-solomon code).
10.048771
2.14862
4.676848
'''Automatically right pad with null bytes an ecc to fill for missing bytes if too small, or leave unchanged if not necessary. This can be used as a workaround for field delimiter misdetection. Equivalent to puncturing (punctured reed-solomon code).''' if not k: k = self.k pad = None if len(ecc) < self.n-k: print("Warning: the ecc field may have been truncated (entrymarker or field_delim misdetection?).") #pad = "\x00" * (self.n-k-len(ecc)) pad = bytearray(self.n-k-len(ecc)) ecc = ecc + pad return [ecc, pad]
def rpad(self, ecc, k=None)
Automatically right pad with null bytes an ecc to fill for missing bytes if too small, or leave unchanged if not necessary. This can be used as a workaround for field delimiter misdetection. Equivalent to puncturing (punctured reed-solomon code).
11.926827
3.502522
3.405212
'''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.''' if not k: k = self.k message, _ = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) if self.algo == 1 or self.algo == 2: return self.ecc_manager.check_fast(message + ecc, k=k) elif self.algo == 3 or self.algo == 4: return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
def check(self, message, ecc, k=None)
Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
6.928394
3.520764
1.967867
'''Provide a description for each algorithm available, useful to print in ecc file''' if 0 < self.algo <= 3: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) elif self.algo == 4: return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) under US FAA ADSB UAT RS FEC standard with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr) else: return "No description for this ECC algorithm."
def description(self)
Provide a description for each algorithm available, useful to print in ecc file
7.039098
5.340594
1.318037
if fn is None: # @profile() syntax -- we are a decorator maker def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator # @profile syntax -- we are a decorator. if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(AVAILABLE_PROFILERS)) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) # fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...) # or HotShotFuncProfile # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot'))
Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ...
2.916422
3.280879
0.888915
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
def coverage(fn)
Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ...
4.869076
5.29897
0.918872
fp = HotShotFuncCoverage(fn) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
def coverage_with_hotshot(fn)
Mark `fn` for line coverage analysis. Uses the 'hotshot' module for fast coverage analysis. BUG: Produces inaccurate results. See the docstring of `coverage` for usage examples.
3.815768
4.194151
0.909783
if fn is None: # @timecall() syntax -- we are a decorator maker def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator # @timecall syntax -- we are a decorator. fp = FuncTimer(fn, immediate=immediate, timer=timer) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
def timecall(fn=None, immediate=True, timer=time.time)
Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock)
3.454685
4.546335
0.759884
funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** PROFILER RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) if self.skipped: skipped = "(%d calls not profiled)" % self.skipped else: skipped = "" print("function called %d times%s" % (self.ncalls, skipped)) print("") stats = self.stats if self.filename: stats.dump_stats(self.filename) if not self.dirs: stats.strip_dirs() stats.sort_stats(*self.sort) stats.print_stats(self.entries)
def print_stats(self)
Print profile information to sys.stdout.
3.262795
3.081722
1.058757
# Note: not using self.Profile, since pstats.Stats() fails then self.stats = pstats.Stats(Profile()) self.ncalls = 0 self.skipped = 0
def reset_stats(self)
Reset accumulated profiler statistics.
12.553613
10.58963
1.185463
funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** COVERAGE RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) print("function called %d times" % self.ncalls) print("") fs = FuncSource(self.fn) for (filename, lineno), count in self.tracer.counts.items(): if filename != fs.filename: continue fs.mark(lineno, count) print(fs) never_executed = fs.count_never_executed() if never_executed: print("%d lines were not executed." % never_executed)
def atexit(self)
Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook.
3.743369
3.75212
0.997668
strs = trace.find_strings(self.filename) lines = trace.find_lines_from_code(self.fn.__code__, strs) self.firstcodelineno = sys.maxint for lineno in lines: self.firstcodelineno = min(self.firstcodelineno, lineno) self.sourcelines.setdefault(lineno, 0) if self.firstcodelineno == sys.maxint: self.firstcodelineno = self.firstlineno
def find_source_lines(self)
Mark all executable source lines in fn as executed 0 times.
4.207219
3.842911
1.0948
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def mark(self, lineno, count=1)
Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up.
3.665524
3.138519
1.167915
lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter
def count_never_executed(self)
Count statements that were never executed.
4.902187
4.77546
1.026537
_maxes, _lists = self._maxes, self._lists if _maxes: pos = bisect_right(_maxes, val) if pos == len(_maxes): pos -= 1 _maxes[pos] = val _lists[pos].append(val) else: insort(_lists[pos], val) self._expand(pos) else: _maxes.append(val) _lists.append([val]) self._len += 1
def add(self, val)
Add the element *val* to the list.
2.942898
2.79357
1.053454
_maxes = self._maxes if not _maxes: raise ValueError('{0} not in list'.format(repr(val))) pos = bisect_left(_maxes, val) if pos == len(_maxes): raise ValueError('{0} not in list'.format(repr(val))) _lists = self._lists idx = bisect_left(_lists[pos], val) if _lists[pos][idx] == val: self._delete(pos, idx) else: raise ValueError('{0} not in list'.format(repr(val)))
def remove(self, val)
Remove first occurrence of *val*. Raises ValueError if *val* is not present.
2.606142
2.459308
1.059706
_maxes, _lists, _index = self._maxes, self._lists, self._index lists_pos = _lists[pos] del lists_pos[idx] self._len -= 1 len_lists_pos = len(lists_pos) if len_lists_pos > self._half: _maxes[pos] = lists_pos[-1] if _index: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_lists) > 1: if not pos: pos += 1 prev = pos - 1 _lists[prev].extend(_lists[pos]) _maxes[prev] = _lists[prev][-1] del _maxes[pos] del _lists[pos] del _index[:] self._expand(prev) elif len_lists_pos: _maxes[pos] = lists_pos[-1] else: del _maxes[pos] del _lists[pos] del _index[:]
def _delete(self, pos, idx)
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
3.009346
2.927037
1.02812
_maxes, _lists, _load = self._maxes, self._lists, self._load if not isinstance(values, list): values = list(values) if any(values[pos - 1] > values[pos] for pos in range(1, len(values))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if values[0] < _lists[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_lists[-1]) < self._half: _lists[-1].extend(values[:_load]) _maxes[-1] = _lists[-1][-1] offset = _load len_lists = len(_lists) for idx in range(offset, len(values), _load): _lists.append(values[idx:(idx + _load)]) _maxes.append(_lists[-1][-1]) _index = self._index if len_lists == len(_lists): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
def extend(self, values)
Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated.
3.368855
3.19166
1.055518
_maxes, _lists, _len = self._maxes, self._lists, self._len if idx < 0: idx += _len if idx < 0: idx = 0 if idx > _len: idx = _len if not _maxes: # The idx must be zero by the inequalities above. _maxes.append(val) _lists.append([val]) self._len = 1 return if not idx: if val > _lists[0][0]: msg = '{0} not in sort order at index {1}'.format(repr(val), 0) raise ValueError(msg) else: _lists[0].insert(0, val) self._expand(0) self._len += 1 return if idx == _len: pos = len(_lists) - 1 if _lists[pos][-1] > val: msg = '{0} not in sort order at index {1}'.format(repr(val), _len) raise ValueError(msg) else: _lists[pos].append(val) _maxes[pos] = _lists[pos][-1] self._expand(pos) self._len += 1 return pos, idx = self._pos(idx) idx_before = idx - 1 if idx_before < 0: pos_before = pos - 1 idx_before = len(_lists[pos_before]) - 1 else: pos_before = pos before = _lists[pos_before][idx_before] if before <= val <= _lists[pos][idx]: _lists[pos].insert(idx, val) self._expand(pos) self._len += 1 else: msg = '{0} not in sort order at index {1}'.format(repr(val), idx) raise ValueError(msg)
def insert(self, idx, val)
Insert the element *val* into the list at *idx*. Raises a ValueError if the *val* at *idx* would violate the sort order.
2.245786
2.19273
1.024196
_maxes, _lists, _keys = self._maxes, self._lists, self._keys values = sorted(iterable, key=self._key) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort(key=self._key) self._clear() else: _add = self.add for val in values: _add(val) return _load, _index = self._load, self._index _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _keys.extend(list(map(self._key, _list)) for _list in _lists) _maxes.extend(sublist[-1] for sublist in _keys) self._len = len(values) del _index[:]
def update(self, iterable)
Update the list by adding all elements from *iterable*.
3.766227
3.611745
1.042772
if self.get_mode() == "ibm": self.__username_ibm = username_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format( self.get_moder()))
def set_username_ibm(self, username_ibm)
Parameters ---------- username_ibm : str Raises ------ Exception If mode is not `ibm`
5.7236
4.831472
1.184649
if self.get_mode() == "ibm": self.__password_ibm = password_ibm else: raise Exception( "Mode is {}, whereas it must be `ibm`".format(self.get_mode()))
def set_password_ibm(self, password_ibm)
Parameters ---------- password_ibm : str Raises ------ Exception If mode is not `ibm`
4.371972
3.772943
1.15877
audio_files = list() for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir, sub_dir)): file_format = ''.join(possibly_audio_file.split('.')[-1]) if file_format.lower() == "wav": audio_files.append(possibly_audio_file) return audio_files
def _list_audio_files(self, sub_dir="")
Parameters ---------- sub_dir : one of `needed_directories`, optional Default is "", which means it'll look through all of subdirs. Returns ------- audio_files : [str] A list whose elements are basenames of the present audiofiles whose formats are `wav`
3.028639
3.316992
0.913068
channel_num = int( subprocess.check_output( ( ).format(audio_abs_path, "Channels"), shell=True, universal_newlines=True).rstrip()) return channel_num
def _get_audio_channels(self, audio_abs_path)
Parameters ---------- audio_abs_path : str Returns ------- channel_num : int
5.976933
5.781182
1.03386
sample_rate = int( subprocess.check_output( ( ).format(audio_abs_path, "Sample Rate"), shell=True, universal_newlines=True).rstrip()) return sample_rate
def _get_audio_sample_rate(self, audio_abs_path)
Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int
6.311676
6.555768
0.962767
sample_bit = int( subprocess.check_output( ( ).format(audio_abs_path, "Precision"), shell=True, universal_newlines=True).rstrip()) return sample_bit
def _get_audio_sample_bit(self, audio_abs_path)
Parameters ---------- audio_abs_path : str Returns ------- sample_bit : int
8.015084
8.635228
0.928184
HHMMSS_duration = subprocess.check_output( ( ).format( audio_abs_path, "Duration"), shell=True, universal_newlines=True).rstrip() total_seconds = sum( [float(x) * 60 ** (2 - i) for i, x in enumerate(HHMMSS_duration.split(":"))]) return total_seconds
def _get_audio_duration_seconds(self, audio_abs_path)
Parameters ---------- audio_abs_path : str Returns ------- total_seconds : int
4.126971
4.050694
1.018831
bit_Rate_formatted = subprocess.check_output( .format( audio_abs_path, "Bit Rate"), shell=True, universal_newlines=True).rstrip() bit_rate = (lambda x: int(x[:-1]) * 10 ** 3 if x[-1].lower() == "k" else int(x[:-1]) * 10 ** 6 if x[-1].lower() == "m" else int(x[:-1]) * 10 ** 9 if x[-1].lower() == "g" else int(x))(bit_Rate_formatted) return bit_rate
def _get_audio_bit_rate(self, audio_abs_path)
Parameters ----------- audio_abs_path : str Returns ------- bit_rate : int
2.818786
2.906589
0.969792
less_than_second = seconds - floor(seconds) minutes, seconds = divmod(floor(seconds), 60) hours, minutes = divmod(minutes, 60) return "{}H{}M{}S.{}".format(hours, minutes, seconds, less_than_second)
def _seconds_to_HHMMSS(seconds)
Retuns a string which is the hour, minute, second(milli) representation of the intput `seconds` Parameters ---------- seconds : float Returns ------- str Has the form <int>H<int>M<int>S.<float>
3.017859
2.952787
1.022038
subprocess.Popen(["sox", str(audio_abs_path), str(segment_abs_path), "trim", str(starting_second), str(duration)], universal_newlines=True).communicate()
def _audio_segment_extractor(self, audio_abs_path, segment_abs_path, starting_second, duration)
Parameters ----------- audio_abs_path : str segment_abs_path : str starting_second : int duration : int
3.003312
2.897398
1.036555
total_seconds = self._get_audio_duration_seconds(audio_abs_path) current_segment = 0 while current_segment <= total_seconds // duration_seconds + 1: if current_segment + duration_seconds > total_seconds: ending_second = total_seconds else: ending_second = current_segment + duration_seconds self._audio_segment_extractor( audio_abs_path, results_abs_path.replace("*", "{:03d}".format( current_segment)), starting_second=current_segment, duration=(ending_second - current_segment)) current_segment += 1
def _split_audio_by_duration(self, audio_abs_path, results_abs_path, duration_seconds)
Calculates the length of each segment and passes it to self._audio_segment_extractor Parameters ---------- audio_abs_path : str results_abs_path : str A place for adding digits needs to be added prior the the format decleration i.e. name%03.wav. Here, we've added `*` at staging step, which we'll replace. duration_seconds : int
2.612425
2.37091
1.101866
sample_rate = self._get_audio_sample_rate(audio_abs_path) sample_bit = self._get_audio_sample_bit(audio_abs_path) channel_num = self._get_audio_channels(audio_abs_path) duration = 8 * chunk_size / reduce(lambda x, y: int(x) * int(y), [sample_rate, sample_bit, channel_num]) self._split_audio_by_duration(audio_abs_path, results_abs_path, duration)
def _split_audio_by_size(self, audio_abs_path, results_abs_path, chunk_size)
Calculates the duration of the name.wav in order for all splits have the size of chunk_size except possibly the last split (which will be smaller) and then passes the duration to `split_audio_by_duration` Parameters ---------- audio_abs_path : str results_abs_path : str A place for adding digits needs to be added prior the the format decleration i.e. name%03.wav chunk_size : int Should be in bytes
2.292532
2.449315
0.935989
name = ''.join(basename.split('.')[:-1]) # May cause problems if wav is not less than 9 channels. if basename.split('.')[-1] == "wav": if self.get_verbosity(): print("Found wave! Copying to {}/filtered/{}".format( self.src_dir, basename)) subprocess.Popen(["cp", "{}/{}.wav".format(self.src_dir, name), "{}/filtered/{}.wav".format(self.src_dir, name)], universal_newlines=True).communicate()
def _filtering_step(self, basename)
Moves the audio file if the format is `wav` to `filtered` directory. Parameters ---------- basename : str A basename of `/home/random-guy/some-audio-file.wav` is `some-audio-file.wav`
5.208972
5.011836
1.039334
name = ''.join(basename.split('.')[:-1]) if self.get_mode() == "ibm": # Checks the file size. It's better to use 95% of the allocated # size per file since the upper limit is not always respected. total_size = os.path.getsize("{}/filtered/{}.wav".format( self.src_dir, name)) if total_size >= self.ibm_api_limit_bytes: if self.get_verbosity(): print(("{}'s size over API limit ({}). Splitting").format( name, self.ibm_api_limit_bytes)) self._split_audio_by_size( "{}/filtered/{}.wav".format(self.src_dir, name), "{}/staging/{}*.wav".format(self.src_dir, name), self.ibm_api_limit_bytes * 95 / 100) else: if self.get_verbosity(): print("{}'s size is fine. Moving to staging dir'".format( name)) subprocess.Popen(( "mv {}/filtered/{}.wav {}/staging/{}000.wav").format( self.src_dir, name, self.src_dir, name), shell=True, universal_newlines=True).communicate() elif self.get_mode() == "cmu": if self.get_verbosity(): print("Converting {} to a readable wav".format(basename)) ffmpeg = os.path.basename(find_executable("ffmpeg") or find_executable("avconv")) if ffmpeg is None: raise Exception(("Either ffmpeg or avconv is needed. " "Neither is installed or accessible")) try: # ffmpeg log levels: # https://ffmpeg.org/ffmpeg.html#Generic-options ffmpeg_log_level = "8" # fatal errors. if self.get_verbosity(): ffmpeg_log_level = "32" # info `default for ffmpeg` subprocess.check_call([ str(ffmpeg), "-y", "-i", "{}/filtered/{}.wav".format( self.src_dir, str(name)), "-acodec", "pcm_s16le", "-ac", "1", "-ar", "16000", "{}/staging/{}000.wav".format( self.src_dir, name), "-v", ffmpeg_log_level], universal_newlines=True) except subprocess.CalledProcessError as e: print(e) if os.path.exists("{}/staging/{}000.wav".format( self.src_dir, name)): if self.get_verbosity(): print(("{}/filtered/{} was converted to " "{}/staging/{}000.wav Now removing the copy of " "{} in filtered sub directory").format( self.src_dir, basename, self.src_dir, name, basename)) subprocess.Popen([ "rm", "{}/filtered/{}".format(self.src_dir, basename)], universal_newlines=True).communicate() else: raise Exception("Something went wrong with ffmpeg conversion!")
def _staging_step(self, basename)
Checks the size of audio file, splits it if it's needed to manage api limit and then moves to `staged` directory while appending `*` to the end of the filename for self.split_audio_by_duration to replace it by a number. Parameters ---------- basename : str A basename of `/home/random-guy/some-audio-file.wav` is `some-audio-file.wav`
3.460738
3.293924
1.050643
if basename is not None: if basename in self.get_timestamps(): if self.get_verbosity(): print("File specified was already indexed. Reindexing...") del self.__timestamps[basename] self._filtering_step(basename) self._staging_step(basename) else: for audio_basename in self._list_audio_files(): if audio_basename in self.__timestamps: if replace_already_indexed: if self.get_verbosity(): print("Already indexed {}. Reindexing...".format( audio_basename)) del self.__timestamps[audio_basename] else: if self.get_verbosity(): print("Already indexed {}. Skipping...".format( audio_basename)) continue self._filtering_step(audio_basename) self._staging_step(audio_basename)
def _prepare_audio(self, basename, replace_already_indexed=False)
Prepares and stages the audio file to be indexed. Parameters ---------- basename : str, None A basename of `/home/random-guy/some-audio-file.wav` is `some-audio-file.wav` If basename is `None`, it'll prepare all the audio files.
2.813772
2.841329
0.990302
self._prepare_audio(basename=basename, replace_already_indexed=replace_already_indexed) for staging_audio_basename in self._list_audio_files( sub_dir="staging"): original_audio_name = ''.join( staging_audio_basename.split('.')[:-1])[:-3] pocketsphinx_command = ''.join([ "pocketsphinx_continuous", "-infile", str("{}/staging/{}".format( self.src_dir, staging_audio_basename)), "-time", "yes", "-logfn", "/dev/null"]) try: if self.get_verbosity(): print("Now indexing {}".format(staging_audio_basename)) output = subprocess.check_output([ "pocketsphinx_continuous", "-infile", str("{}/staging/{}".format( self.src_dir, staging_audio_basename)), "-time", "yes", "-logfn", "/dev/null" ], universal_newlines=True).split('\n') str_timestamps_with_sil_conf = list(map( lambda x: x.split(" "), filter(None, output[1:]))) # Timestamps are putted in a list of a single element. To match # Watson's output. self.__timestamps_unregulated[ original_audio_name + ".wav"] = [( self._timestamp_extractor_cmu( staging_audio_basename, str_timestamps_with_sil_conf))] if self.get_verbosity(): print("Done indexing {}".format(staging_audio_basename)) except OSError as e: if self.get_verbosity(): print(e, "The command was: {}".format( pocketsphinx_command)) self.__errors[(time(), staging_audio_basename)] = e self._timestamp_regulator() if self.get_verbosity(): print("Finished indexing procedure")
def _index_audio_cmu(self, basename=None, replace_already_indexed=False)
Indexes audio with pocketsphinx. Beware that the output would not be sufficiently accurate. Use this only if you don't want to upload your files to IBM. Parameters ----------- basename : str, optional A specific basename to be indexed and is placed in src_dir E.g. `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. Raises ------ OSError If the output of pocketsphinx command results in an error.
4.277213
4.095264
1.044429
filter_untimed = filter(lambda x: len(x) == 4, str_timestamps_with_sil_conf) if filter_untimed != str_timestamps_with_sil_conf: self.__errors[ (time(), staging_audio_basename) ] = str_timestamps_with_sil_conf str_timestamps = [ str_timestamp[:-1] for str_timestamp in filter_untimed if not any([letter in {"<", ">", "/"} for letter in ''.join(str_timestamp)])] timestamps = list([ _WordBlock( word=re.findall("^[^\(]+", x[0])[0], start=round(float(x[1]), 2), end=round(float(x[2]), 2) ) for x in str_timestamps]) return timestamps
def _timestamp_extractor_cmu(self, staging_audio_basename, str_timestamps_with_sil_conf)
Parameters ---------- str_timestamps_with_sil_conf : [[str, str, str, str]] Of the form [[word, starting_sec, ending_sec, confidence]] Returns ------- timestamps : [[str, float, float]]
4.305941
4.324192
0.995779
try: timestamps_of_sentences = [ audio_json['results'][i]['alternatives'][0]['timestamps'] for i in range(len(audio_json['results']))] return [ _WordBlock( word=word_block[0], start=round(float(word_block[1]), 2), end=round(float(word_block[2]), 2) ) for sentence_block in timestamps_of_sentences for word_block in sentence_block] except KeyError: self.__errors[(time(), staging_audio_basename)] = audio_json if self.get_verbosity(): print(audio_json) print("The resulting request from Watson was unintelligible.") return False
def _timestamp_extractor_ibm(self, staging_audio_basename, audio_json)
Parameters ---------- audio_json : {str: [{str: [{str: str or nuneric}]}]} Refer to Watson Speech API refrence [1]_ Returns ------- [[str, float, float]] A list whose members are lists. Each member list has three elements. First one is a word. Second is the starting second and the third is the ending second of that word in the original audio file.
4.313477
4.16611
1.035373
with _Subdirectory_Managing_Decorator( self.src_dir, self._needed_directories): if self.get_mode() == "ibm": self._index_audio_ibm(*args, **kwargs) elif self.get_mode() == "cmu": self._index_audio_cmu(*args, **kwargs)
def index_audio(self, *args, **kwargs)
Calls the correct indexer function based on the mode. If mode is `ibm`, _indexer_audio_ibm is called which is an interface for Watson. Note that some of the explaination of _indexer_audio_ibm's arguments is from [1]_ If mode is `cmu`, _indexer_audio_cmu is called which is an interface for PocketSphinx Beware that the output would not be sufficiently accurate. Use this only if you don't want to upload your files to IBM. Parameters ---------- mode : {"ibm", "cmu"} basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Valid Only if mode is `ibm` Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } Valid Only if mode is `ibm` The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Valid Only if mode is `ibm` Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric Valid Only if mode is `ibm` A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Valid Only if mode is `ibm` Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. Raises ------ OSError Valid only if mode is `cmu`. If the output of pocketsphinx command results in an error. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ Else if mode is `cmu`, then _index_audio_cmu would be called:
5.77765
4.724151
1.223003
unified_timestamps = _PrettyDefaultDict(list) staged_files = self._list_audio_files(sub_dir="staging") for timestamp_basename in self.__timestamps_unregulated: if len(self.__timestamps_unregulated[timestamp_basename]) > 1: # File has been splitted timestamp_name = ''.join(timestamp_basename.split('.')[:-1]) staged_splitted_files_of_timestamp = list( filter(lambda staged_file: ( timestamp_name == staged_file[:-3] and all([(x in set(map(str, range(10)))) for x in staged_file[-3:]])), staged_files)) if len(staged_splitted_files_of_timestamp) == 0: self.__errors[(time(), timestamp_basename)] = { "reason": "Missing staged file", "current_staged_files": staged_files} continue staged_splitted_files_of_timestamp.sort() unified_timestamp = list() for staging_digits, splitted_file in enumerate( self.__timestamps_unregulated[timestamp_basename]): prev_splits_sec = 0 if int(staging_digits) != 0: prev_splits_sec = self._get_audio_duration_seconds( "{}/staging/{}{:03d}".format( self.src_dir, timestamp_name, staging_digits - 1)) for word_block in splitted_file: unified_timestamp.append( _WordBlock( word=word_block.word, start=round(word_block.start + prev_splits_sec, 2), end=round(word_block.end + prev_splits_sec, 2))) unified_timestamps[ str(timestamp_basename)] += unified_timestamp else: unified_timestamps[ timestamp_basename] += self.__timestamps_unregulated[ timestamp_basename][0] self.__timestamps.update(unified_timestamps) self.__timestamps_unregulated = _PrettyDefaultDict(list)
def _timestamp_regulator(self)
Makes a dictionary whose keys are audio file basenames and whose values are a list of word blocks from unregulated timestamps and updates the main timestamp attribute. After all done, purges unregulated ones. In case the audio file was large enough to be splitted, it adds seconds to correct timing and in case the timestamp was manually loaded, it leaves it alone. Note that the difference between self.__timestamps and self.__timestamps_unregulated is that in the regulated version, right after the word, a list of word blocks must appear. However in the unregulated version, after a word, a list of individual splits containing word blocks would appear!
3.694979
3.234724
1.142286