_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q10100
|
ClassTracker._track_modify
|
train
|
def _track_modify(self, cls, name, detail, keep, trace):
"""
Modify settings of a tracked class
"""
self._observers[cls].modify(name, detail, keep, trace)
|
python
|
{
"resource": ""
}
|
q10101
|
ClassTracker._restore_constructor
|
train
|
def _restore_constructor(self, cls):
"""
Restore the original constructor, lose track of class.
"""
cls.__init__ = self._observers[cls].init
del self._observers[cls]
|
python
|
{
"resource": ""
}
|
q10102
|
ClassTracker.track_change
|
train
|
def track_change(self, instance, resolution_level=0):
"""
Change tracking options for the already tracked object 'instance'.
If instance is not tracked, a KeyError will be raised.
"""
tobj = self.objects[id(instance)]
tobj.set_resolution_level(resolution_level)
|
python
|
{
"resource": ""
}
|
q10103
|
ClassTracker.track_object
|
train
|
def track_object(self, instance, name=None, resolution_level=0, keep=False, trace=False):
"""
Track object 'instance' and sample size and lifetime information.
Not all objects can be tracked; trackable objects are class instances and
other objects that can be weakly referenced. When an object cannot be
tracked, a `TypeError` is raised.
:param resolution_level: The recursion depth up to which referents are
sized individually. Resolution level 0 (default) treats the object
as an opaque entity, 1 sizes all direct referents individually, 2
also sizes the referents of the referents and so forth.
:param keep: Prevent the object's deletion by keeping a (strong)
reference to the object.
"""
# Check if object is already tracked. This happens if track_object is
# called multiple times for the same object or if an object inherits
# from multiple tracked classes. In the latter case, the most
# specialized class wins. To detect id recycling, the weak reference
# is checked. If it is 'None' a tracked object is dead and another one
# takes the same 'id'.
if id(instance) in self.objects and \
self.objects[id(instance)].ref() is not None:
return
tobj = TrackedObject(instance, resolution_level=resolution_level, trace=trace)
if name is None:
name = instance.__class__.__name__
if not name in self.index:
self.index[name] = []
self.index[name].append(tobj)
self.objects[id(instance)] = tobj
if keep:
self._keepalive.append(instance)
|
python
|
{
"resource": ""
}
|
q10104
|
ClassTracker.detach_all_classes
|
train
|
def detach_all_classes(self):
"""
Detach from all tracked classes.
"""
classes = list(self._observers.keys())
for cls in classes:
self.detach_class(cls)
|
python
|
{
"resource": ""
}
|
q10105
|
ClassTracker.detach_all
|
train
|
def detach_all(self):
"""
Detach from all tracked classes and objects.
Restore the original constructors and cleanse the tracking lists.
"""
self.detach_all_classes()
self.objects.clear()
self.index.clear()
self._keepalive[:] = []
|
python
|
{
"resource": ""
}
|
q10106
|
ClassTracker.start_periodic_snapshots
|
train
|
def start_periodic_snapshots(self, interval=1.0):
"""
Start a thread which takes snapshots periodically. The `interval` specifies
the time in seconds the thread waits between taking snapshots. The thread is
started as a daemon allowing the program to exit. If periodic snapshots are
already active, the interval is updated.
"""
if not self._periodic_thread:
self._periodic_thread = PeriodicThread(self, interval, name='BackgroundMonitor')
self._periodic_thread.setDaemon(True)
self._periodic_thread.start()
else:
self._periodic_thread.interval = interval
|
python
|
{
"resource": ""
}
|
q10107
|
ClassTracker.stop_periodic_snapshots
|
train
|
def stop_periodic_snapshots(self):
"""
Post a stop signal to the thread that takes the periodic snapshots. The
function waits for the thread to terminate which can take some time
depending on the configured interval.
"""
if self._periodic_thread and self._periodic_thread.isAlive():
self._periodic_thread.stop = True
self._periodic_thread.join()
self._periodic_thread = None
|
python
|
{
"resource": ""
}
|
q10108
|
ClassTracker.create_snapshot
|
train
|
def create_snapshot(self, description='', compute_total=False):
"""
Collect current per instance statistics and saves total amount of
memory associated with the Python process.
If `compute_total` is `True`, the total consumption of all objects
known to *asizeof* is computed. The latter might be very slow if many
objects are mapped into memory at the time the snapshot is taken.
Therefore, `compute_total` is set to `False` by default.
The overhead of the `ClassTracker` structure is also computed.
Snapshots can be taken asynchronously. The function is protected with a
lock to prevent race conditions.
"""
try:
# TODO: It is not clear what happens when memory is allocated or
# released while this function is executed but it will likely lead
# to inconsistencies. Either pause all other threads or don't size
# individual objects in asynchronous mode.
self.snapshot_lock.acquire()
timestamp = _get_time()
sizer = asizeof.Asizer()
objs = [tobj.ref() for tobj in list(self.objects.values())]
sizer.exclude_refs(*objs)
# The objects need to be sized in a deterministic order. Sort the
# objects by its creation date which should at least work for non-parallel
# execution. The "proper" fix would be to handle shared data separately.
tracked_objects = list(self.objects.values())
tracked_objects.sort(key=lambda x: x.birth)
for tobj in tracked_objects:
tobj.track_size(timestamp, sizer)
snapshot = Snapshot()
snapshot.timestamp = timestamp
snapshot.tracked_total = sizer.total
if compute_total:
snapshot.asizeof_total = asizeof.asizeof(all=True, code=True)
snapshot.system_total = pympler.process.ProcessMemoryInfo()
snapshot.desc = str(description)
# Compute overhead of all structures, use sizer to exclude tracked objects(!)
snapshot.overhead = 0
if snapshot.tracked_total:
snapshot.overhead = sizer.asizeof(self)
if snapshot.asizeof_total:
snapshot.asizeof_total -= snapshot.overhead
self.snapshots.append(snapshot)
finally:
self.snapshot_lock.release()
|
python
|
{
"resource": ""
}
|
q10109
|
is_required
|
train
|
def is_required(action):
'''_actions which are positional or possessing the `required` flag '''
return not action.option_strings and not isinstance(action, _SubParsersAction) or action.required == True
|
python
|
{
"resource": ""
}
|
q10110
|
compute_ecc_hash
|
train
|
def compute_ecc_hash(ecc_manager, hasher, buf, max_block_size, rate, message_size=None, as_string=False):
'''Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.'''
result = []
# If required parameters were not provided, we compute them
if not message_size:
ecc_params = compute_ecc_params(max_block_size, rate, hasher)
message_size = ecc_params["message_size"]
# Split the buffer string in blocks (necessary for Reed-Solomon encoding because it's limited to 255 characters max)
for i in xrange(0, len(buf), message_size):
# Compute the message block
mes = buf[i:i+message_size]
# Compute the ecc
ecc = ecc_manager.encode(mes)
# Compute the hash
hash = hasher.hash(mes)
#crc = zlib.crc32(mes) # DEPRECATED: CRC is not resilient enough
#print("mes %i (%i) - ecc %i (%i) - hash %i (%i)" % (len(mes), message_size, len(ecc), ecc_params["ecc_size"], len(hash), ecc_params["hash_size"])) # DEBUGLINE
# Return the result (either in string for easy writing into a file, or in a list for easy post-processing)
if as_string:
result.append("%s%s" % (str(hash),str(ecc)))
else:
result.append([hash, ecc])
return result
|
python
|
{
"resource": ""
}
|
q10111
|
ReferenceGraph._eliminate_leafs
|
train
|
def _eliminate_leafs(self, graph):
"""
Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs.
"""
result = []
idset = set([id(x) for x in graph])
for n in graph:
refset = set([id(x) for x in get_referents(n)])
if refset.intersection(idset):
result.append(n)
return result
|
python
|
{
"resource": ""
}
|
q10112
|
ReferenceGraph._reduce_to_cycles
|
train
|
def _reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the number of objects involved in reference
cycles. If there are no cycles, `self.objects` will be an empty list and
this method returns 0.
"""
cycles = self.objects[:]
cnt = 0
while cnt != len(cycles):
cnt = len(cycles)
cycles = self._eliminate_leafs(cycles)
self.objects = cycles
return len(self.objects)
|
python
|
{
"resource": ""
}
|
q10113
|
ReferenceGraph.reduce_to_cycles
|
train
|
def reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the reduced graph. If there are no cycles,
None is returned.
"""
if not self._reduced:
reduced = copy(self)
reduced.objects = self.objects[:]
reduced.metadata = []
reduced.edges = []
self.num_in_cycles = reduced._reduce_to_cycles()
reduced.num_in_cycles = self.num_in_cycles
if self.num_in_cycles:
reduced._get_edges()
reduced._annotate_objects()
for meta in reduced.metadata:
meta.cycle = True
else:
reduced = None
self._reduced = reduced
return self._reduced
|
python
|
{
"resource": ""
}
|
q10114
|
ReferenceGraph._filter_group
|
train
|
def _filter_group(self, group):
"""
Eliminate all objects but those which belong to `group`.
``self.objects``, ``self.metadata`` and ``self.edges`` are modified.
Returns `True` if the group is non-empty. Otherwise returns `False`.
"""
self.metadata = [x for x in self.metadata if x.group == group]
group_set = set([x.id for x in self.metadata])
self.objects = [obj for obj in self.objects if id(obj) in group_set]
self.count = len(self.metadata)
if self.metadata == []:
return False
self.edges = [e for e in self.edges if e.group == group]
del self._max_group
return True
|
python
|
{
"resource": ""
}
|
q10115
|
ReferenceGraph.split
|
train
|
def split(self):
"""
Split the graph into sub-graphs. Only connected objects belong to the
same graph. `split` yields copies of the Graph object. Shallow copies
are used that only replicate the meta-information, but share the same
object list ``self.objects``.
>>> from pympler.refgraph import ReferenceGraph
>>> a = 42
>>> b = 'spam'
>>> c = {a: b}
>>> t = (1,2,3)
>>> rg = ReferenceGraph([a,b,c,t])
>>> for subgraph in rg.split():
... print subgraph.index
0
1
"""
self._annotate_groups()
index = 0
for group in range(self._max_group):
subgraph = copy(self)
subgraph.metadata = self.metadata[:]
subgraph.edges = self.edges.copy()
if subgraph._filter_group(group):
subgraph.total_size = sum([x.size for x in subgraph.metadata])
subgraph.index = index
index += 1
yield subgraph
|
python
|
{
"resource": ""
}
|
q10116
|
ReferenceGraph.split_and_sort
|
train
|
def split_and_sort(self):
"""
Split the graphs into sub graphs and return a list of all graphs sorted
by the number of nodes. The graph with most nodes is returned first.
"""
graphs = list(self.split())
graphs.sort(key=lambda x: -len(x.metadata))
for index, graph in enumerate(graphs):
graph.index = index
return graphs
|
python
|
{
"resource": ""
}
|
q10117
|
ReferenceGraph._annotate_objects
|
train
|
def _annotate_objects(self):
"""
Extract meta-data describing the stored objects.
"""
self.metadata = []
sizer = Asizer()
sizes = sizer.asizesof(*self.objects)
self.total_size = sizer.total
for obj, sz in zip(self.objects, sizes):
md = _MetaObject()
md.size = sz
md.id = id(obj)
try:
md.type = obj.__class__.__name__
except (AttributeError, ReferenceError): # pragma: no cover
md.type = type(obj).__name__
md.str = safe_repr(obj, clip=128)
self.metadata.append(md)
|
python
|
{
"resource": ""
}
|
q10118
|
ReferenceGraph._get_graphviz_data
|
train
|
def _get_graphviz_data(self):
"""
Emit a graph representing the connections between the objects described
within the metadata list. The text representation can be transformed to
a graph with graphviz. Returns a string.
"""
s = []
header = '// Process this file with graphviz\n'
s.append( header)
s.append('digraph G {\n')
s.append(' node [shape=box];\n')
for md in self.metadata:
label = trunc(md.str, 48).replace('"', "'")
extra = ''
if md.type == 'instancemethod':
extra = ', color=red'
elif md.type == 'frame':
extra = ', color=orange'
s.append(' "X%s" [ label = "%s\\n%s" %s ];\n' % \
(hex(md.id)[1:], label, md.type, extra))
for e in self.edges:
extra = ''
if e.label == '__dict__':
extra = ',weight=100'
s.append(' X%s -> X%s [label="%s"%s];\n' % \
(hex(e.src)[1:], hex(e.dst)[1:], e.label, extra))
s.append('}\n')
return "".join(s)
|
python
|
{
"resource": ""
}
|
q10119
|
ReferenceGraph.write_graph
|
train
|
def write_graph(self, filename):
"""
Write raw graph data which can be post-processed using graphviz.
"""
f = open(filename, 'w')
f.write(self._get_graphviz_data())
f.close()
|
python
|
{
"resource": ""
}
|
q10120
|
Profiler.root_frame
|
train
|
def root_frame(self):
"""
Returns the parsed results in the form of a tree of Frame objects
"""
if not hasattr(self, '_root_frame'):
self._root_frame = Frame()
# define a recursive function that builds the hierarchy of frames given the
# stack of frame identifiers
def frame_for_stack(stack):
if len(stack) == 0:
return self._root_frame
parent = frame_for_stack(stack[:-1])
frame_name = stack[-1]
if not frame_name in parent.children_dict:
parent.add_child(Frame(frame_name, parent))
return parent.children_dict[frame_name]
for stack, self_time in self.stack_self_time.items():
frame_for_stack(stack).self_time = self_time
return self._root_frame
|
python
|
{
"resource": ""
}
|
q10121
|
is_module_stdlib
|
train
|
def is_module_stdlib(file_name):
"""Returns True if the file_name is in the lib directory."""
# TODO: Move these calls away from this function so it doesn't have to run
# every time.
lib_path = sysconfig.get_python_lib()
path = os.path.split(lib_path)
if path[1] == 'site-packages':
lib_path = path[0]
return file_name.lower().startswith(lib_path.lower())
|
python
|
{
"resource": ""
}
|
q10122
|
tracer
|
train
|
def tracer(frame, event, arg):
"""This is an internal function that is called every time a call is made
during a trace. It keeps track of relationships between calls.
"""
global func_count_max
global func_count
global trace_filter
global time_filter
global call_stack
global func_time
global func_time_max
if event == 'call':
keep = True
code = frame.f_code
# Stores all the parts of a human readable name of the current call.
full_name_list = []
# Work out the module name
module = inspect.getmodule(code)
if module:
module_name = module.__name__
module_path = module.__file__
if not settings['include_stdlib'] \
and is_module_stdlib(module_path):
keep = False
if module_name == '__main__':
module_name = ''
else:
module_name = ''
if module_name:
full_name_list.append(module_name)
# Work out the class name.
try:
class_name = frame.f_locals['self'].__class__.__name__
full_name_list.append(class_name)
except (KeyError, AttributeError):
class_name = ''
# Work out the current function or method
func_name = code.co_name
if func_name == '?':
func_name = '__main__'
full_name_list.append(func_name)
# Create a readable representation of the current call
full_name = '.'.join(full_name_list)
# Load the trace filter, if any. 'keep' determines if we should ignore
# this call
if keep and trace_filter:
keep = trace_filter(call_stack, module_name, class_name,
func_name, full_name)
# Store the call information
if keep:
if call_stack:
fr = call_stack[-1]
else:
fr = None
if fr not in call_dict:
call_dict[fr] = {}
if full_name not in call_dict[fr]:
call_dict[fr][full_name] = 0
call_dict[fr][full_name] += 1
if full_name not in func_count:
func_count[full_name] = 0
func_count[full_name] += 1
if func_count[full_name] > func_count_max:
func_count_max = func_count[full_name]
call_stack.append(full_name)
call_stack_timer.append(time.time())
else:
call_stack.append('')
call_stack_timer.append(None)
if event == 'return':
if call_stack:
full_name = call_stack.pop(-1)
if call_stack_timer:
t = call_stack_timer.pop(-1)
else:
t = None
if t and time_filter(stack=call_stack, full_name=full_name):
if full_name not in func_time:
func_time[full_name] = 0
call_time = (time.time() - t)
func_time[full_name] += call_time
if func_time[full_name] > func_time_max:
func_time_max = func_time[full_name]
return tracer
|
python
|
{
"resource": ""
}
|
q10123
|
get_dot
|
train
|
def get_dot(stop=True):
"""Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
"""
defaults = []
nodes = []
edges = []
# define default attributes
for comp, comp_attr in graph_attributes.items():
attr = ', '.join( '%s = "%s"' % (attr, val)
for attr, val in comp_attr.items() )
defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() )
# define nodes
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()]
node_str = '"%s" [%s];' % (func, ', '.join(attribs))
nodes.append( node_str % locals() )
# define edges
for fr_key, fr_val in call_dict.items():
if not fr_key: continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, totla_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
edge = '[ color = "%s", label="%s" ]' % (col, to_val)
edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge))
defaults = '\n\t'.join( defaults )
nodes = '\n\t'.join( nodes )
edges = '\n\t'.join( edges )
dot_fmt = ("digraph G {\n"
" %(defaults)s\n\n"
" %(nodes)s\n\n"
" %(edges)s\n}\n"
)
return dot_fmt % locals()
|
python
|
{
"resource": ""
}
|
q10124
|
get_gdf
|
train
|
def get_gdf(stop=True):
"""Returns a string containing a GDF file. Setting stop to True will cause
the trace to stop.
"""
ret = ['nodedef>name VARCHAR, label VARCHAR, hits INTEGER, ' + \
'calls_frac DOUBLE, total_time_frac DOUBLE, ' + \
'total_time DOUBLE, color VARCHAR, width DOUBLE']
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
color = ','.join([str(round(float(c) * 255)) for c in col.split()])
ret.append('%s,%s,%s,%s,%s,%s,\'%s\',%s' % (func, func, hits, \
calls_frac, total_time_frac, total_time, color, \
math.log(hits * 10)))
ret.append('edgedef>node1 VARCHAR, node2 VARCHAR, color VARCHAR')
for fr_key, fr_val in call_dict.items():
if fr_key == '':
continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, total_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
color = ','.join([str(round(float(c) * 255)) for c in col.split()])
ret.append('%s,%s,\'%s\'' % (fr_key, to_key, color))
ret = '\n'.join(ret)
return ret
|
python
|
{
"resource": ""
}
|
q10125
|
make_dot_graph
|
train
|
def make_dot_graph(filename, format='png', tool='dot', stop=True):
"""Creates a graph using a Graphviz tool that supports the dot language. It
will output into a file specified by filename with the format specified.
Setting stop to True will stop the current trace.
"""
if stop:
stop_trace()
dot_data = get_dot()
# normalize filename
regex_user_expand = re.compile('\A~')
if regex_user_expand.match(filename):
filename = os.path.expanduser(filename)
else:
filename = os.path.expandvars(filename) # expand, just in case
if format == 'dot':
f = open(filename, 'w')
f.write(dot_data)
f.close()
else:
# create a temporary file to be used for the dot data
fd, tempname = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(dot_data)
cmd = '%(tool)s -T%(format)s -o%(filename)s %(tempname)s' % locals()
try:
ret = os.system(cmd)
if ret:
raise PyCallGraphException( \
'The command "%(cmd)s" failed with error ' \
'code %(ret)i.' % locals())
finally:
os.unlink(tempname)
|
python
|
{
"resource": ""
}
|
q10126
|
make_gdf_graph
|
train
|
def make_gdf_graph(filename, stop=True):
"""Create a graph in simple GDF format, suitable for feeding into Gephi,
or some other graph manipulation and display tool. Setting stop to True
will stop the current trace.
"""
if stop:
stop_trace()
try:
f = open(filename, 'w')
f.write(get_gdf())
finally:
if f: f.close()
|
python
|
{
"resource": ""
}
|
q10127
|
simple_memoize
|
train
|
def simple_memoize(callable_object):
"""Simple memoization for functions without keyword arguments.
This is useful for mapping code objects to module in this context.
inspect.getmodule() requires a number of system calls, which may slow down
the tracing considerably. Caching the mapping from code objects (there is
*one* code object for each function, regardless of how many simultaneous
activations records there are).
In this context we can ignore keyword arguments, but a generic memoizer
ought to take care of that as well.
"""
cache = dict()
def wrapper(*rest):
if rest not in cache:
cache[rest] = callable_object(*rest)
return cache[rest]
return wrapper
|
python
|
{
"resource": ""
}
|
q10128
|
macshim
|
train
|
def macshim():
"""Shim to run 32-bit on 64-bit mac as a sub-process"""
import subprocess, sys
subprocess.call([
sys.argv[0] + '32'
]+sys.argv[1:],
env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"}
)
|
python
|
{
"resource": ""
}
|
q10129
|
ECCMan.check
|
train
|
def check(self, message, ecc, k=None):
'''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.'''
if not k: k = self.k
message, _ = self.pad(message, k=k)
ecc, _ = self.rpad(ecc, k=k)
if self.algo == 1 or self.algo == 2:
return self.ecc_manager.check_fast(message + ecc, k=k)
elif self.algo == 3 or self.algo == 4:
return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
|
python
|
{
"resource": ""
}
|
q10130
|
ECCMan.description
|
train
|
def description(self):
'''Provide a description for each algorithm available, useful to print in ecc file'''
if 0 < self.algo <= 3:
return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr)
elif self.algo == 4:
return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) under US FAA ADSB UAT RS FEC standard with generator=%s, prime poly=%s and first consecutive root=%s." % (self.field_charac, self.c_exp, self.gen_nb, hex(self.prim), self.fcr)
else:
return "No description for this ECC algorithm."
|
python
|
{
"resource": ""
}
|
q10131
|
profile
|
train
|
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot')):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call.
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them pstats.Stats(filename).
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(AVAILABLE_PROFILERS))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries)
# fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...)
# or HotShotFuncProfile
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
|
python
|
{
"resource": ""
}
|
q10132
|
timecall
|
train
|
def timecall(fn=None, immediate=True, timer=time.time):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
|
python
|
{
"resource": ""
}
|
q10133
|
FuncProfile.print_stats
|
train
|
def print_stats(self):
"""Print profile information to sys.stdout."""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
if self.skipped:
skipped = "(%d calls not profiled)" % self.skipped
else:
skipped = ""
print("function called %d times%s" % (self.ncalls, skipped))
print("")
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
|
python
|
{
"resource": ""
}
|
q10134
|
FuncProfile.reset_stats
|
train
|
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
|
python
|
{
"resource": ""
}
|
q10135
|
TraceFuncCoverage.atexit
|
train
|
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
print("function called %d times" % self.ncalls)
print("")
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print("%d lines were not executed." % never_executed)
|
python
|
{
"resource": ""
}
|
q10136
|
FuncSource.find_source_lines
|
train
|
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
self.firstcodelineno = sys.maxint
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxint:
self.firstcodelineno = self.firstlineno
|
python
|
{
"resource": ""
}
|
q10137
|
FuncSource.mark
|
train
|
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
|
python
|
{
"resource": ""
}
|
q10138
|
FuncSource.count_never_executed
|
train
|
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
|
python
|
{
"resource": ""
}
|
q10139
|
SimpleAudioIndexer._split_audio_by_duration
|
train
|
def _split_audio_by_duration(self, audio_abs_path,
results_abs_path, duration_seconds):
"""
Calculates the length of each segment and passes it to
self._audio_segment_extractor
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav. Here, we've added `*` at staging
step, which we'll replace.
duration_seconds : int
"""
total_seconds = self._get_audio_duration_seconds(audio_abs_path)
current_segment = 0
while current_segment <= total_seconds // duration_seconds + 1:
if current_segment + duration_seconds > total_seconds:
ending_second = total_seconds
else:
ending_second = current_segment + duration_seconds
self._audio_segment_extractor(
audio_abs_path,
results_abs_path.replace("*", "{:03d}".format(
current_segment)),
starting_second=current_segment, duration=(ending_second -
current_segment))
current_segment += 1
|
python
|
{
"resource": ""
}
|
q10140
|
SimpleAudioIndexer._filtering_step
|
train
|
def _filtering_step(self, basename):
"""
Moves the audio file if the format is `wav` to `filtered` directory.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
"""
name = ''.join(basename.split('.')[:-1])
# May cause problems if wav is not less than 9 channels.
if basename.split('.')[-1] == "wav":
if self.get_verbosity():
print("Found wave! Copying to {}/filtered/{}".format(
self.src_dir, basename))
subprocess.Popen(["cp", "{}/{}.wav".format(self.src_dir, name),
"{}/filtered/{}.wav".format(self.src_dir, name)],
universal_newlines=True).communicate()
|
python
|
{
"resource": ""
}
|
q10141
|
SimpleAudioIndexer._prepare_audio
|
train
|
def _prepare_audio(self, basename, replace_already_indexed=False):
"""
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
"""
if basename is not None:
if basename in self.get_timestamps():
if self.get_verbosity():
print("File specified was already indexed. Reindexing...")
del self.__timestamps[basename]
self._filtering_step(basename)
self._staging_step(basename)
else:
for audio_basename in self._list_audio_files():
if audio_basename in self.__timestamps:
if replace_already_indexed:
if self.get_verbosity():
print("Already indexed {}. Reindexing...".format(
audio_basename))
del self.__timestamps[audio_basename]
else:
if self.get_verbosity():
print("Already indexed {}. Skipping...".format(
audio_basename))
continue
self._filtering_step(audio_basename)
self._staging_step(audio_basename)
|
python
|
{
"resource": ""
}
|
q10142
|
SimpleAudioIndexer._index_audio_cmu
|
train
|
def _index_audio_cmu(self, basename=None, replace_already_indexed=False):
"""
Indexes audio with pocketsphinx. Beware that the output would not be
sufficiently accurate. Use this only if you don't want to upload your
files to IBM.
Parameters
-----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
E.g. `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
Raises
------
OSError
If the output of pocketsphinx command results in an error.
"""
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
pocketsphinx_command = ''.join([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"])
try:
if self.get_verbosity():
print("Now indexing {}".format(staging_audio_basename))
output = subprocess.check_output([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"
], universal_newlines=True).split('\n')
str_timestamps_with_sil_conf = list(map(
lambda x: x.split(" "), filter(None, output[1:])))
# Timestamps are putted in a list of a single element. To match
# Watson's output.
self.__timestamps_unregulated[
original_audio_name + ".wav"] = [(
self._timestamp_extractor_cmu(
staging_audio_basename,
str_timestamps_with_sil_conf))]
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
except OSError as e:
if self.get_verbosity():
print(e, "The command was: {}".format(
pocketsphinx_command))
self.__errors[(time(), staging_audio_basename)] = e
self._timestamp_regulator()
if self.get_verbosity():
print("Finished indexing procedure")
|
python
|
{
"resource": ""
}
|
q10143
|
SimpleAudioIndexer._index_audio_ibm
|
train
|
def _index_audio_ibm(self, basename=None, replace_already_indexed=False,
continuous=True, model="en-US_BroadbandModel",
word_confidence=True, word_alternatives_threshold=0.9,
profanity_filter_for_US_results=False):
"""
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
"""
params = {'continuous': continuous,
'model': model,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': True,
'inactivity_timeout': str(-1),
'profanity_filter': profanity_filter_for_US_results}
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
with open("{}/staging/{}".format(
self.src_dir, staging_audio_basename), "rb") as f:
if self.get_verbosity():
print("Uploading {}...".format(staging_audio_basename))
response = requests.post(
url=("https://stream.watsonplatform.net/"
"speech-to-text/api/v1/recognize"),
auth=(self.get_username_ibm(), self.get_password_ibm()),
headers={'content-type': 'audio/wav'},
data=f.read(),
params=params)
if self.get_verbosity():
print("Indexing {}...".format(staging_audio_basename))
self.__timestamps_unregulated[
original_audio_name + ".wav"].append(
self._timestamp_extractor_ibm(
staging_audio_basename, json.loads(response.text)))
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
self._timestamp_regulator()
if self.get_verbosity():
print("Indexing procedure finished")
|
python
|
{
"resource": ""
}
|
q10144
|
SimpleAudioIndexer.index_audio
|
train
|
def index_audio(self, *args, **kwargs):
"""
Calls the correct indexer function based on the mode.
If mode is `ibm`, _indexer_audio_ibm is called which is an interface
for Watson. Note that some of the explaination of _indexer_audio_ibm's
arguments is from [1]_
If mode is `cmu`, _indexer_audio_cmu is called which is an interface
for PocketSphinx Beware that the output would not be sufficiently
accurate. Use this only if you don't want to upload your files to IBM.
Parameters
----------
mode : {"ibm", "cmu"}
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Valid Only if mode is `ibm`
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
Valid Only if mode is `ibm`
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Valid Only if mode is `ibm`
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
Valid Only if mode is `ibm`
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Valid Only if mode is `ibm`
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
Raises
------
OSError
Valid only if mode is `cmu`.
If the output of pocketsphinx command results in an error.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
Else if mode is `cmu`, then _index_audio_cmu would be called:
"""
with _Subdirectory_Managing_Decorator(
self.src_dir, self._needed_directories):
if self.get_mode() == "ibm":
self._index_audio_ibm(*args, **kwargs)
elif self.get_mode() == "cmu":
self._index_audio_cmu(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10145
|
SimpleAudioIndexer._timestamp_regulator
|
train
|
def _timestamp_regulator(self):
"""
Makes a dictionary whose keys are audio file basenames and whose
values are a list of word blocks from unregulated timestamps and
updates the main timestamp attribute. After all done, purges
unregulated ones.
In case the audio file was large enough to be splitted, it adds seconds
to correct timing and in case the timestamp was manually loaded, it
leaves it alone.
Note that the difference between self.__timestamps and
self.__timestamps_unregulated is that in the regulated version,
right after the word, a list of word blocks must appear. However in the
unregulated version, after a word, a list of individual splits
containing word blocks would appear!
"""
unified_timestamps = _PrettyDefaultDict(list)
staged_files = self._list_audio_files(sub_dir="staging")
for timestamp_basename in self.__timestamps_unregulated:
if len(self.__timestamps_unregulated[timestamp_basename]) > 1:
# File has been splitted
timestamp_name = ''.join(timestamp_basename.split('.')[:-1])
staged_splitted_files_of_timestamp = list(
filter(lambda staged_file: (
timestamp_name == staged_file[:-3] and
all([(x in set(map(str, range(10))))
for x in staged_file[-3:]])), staged_files))
if len(staged_splitted_files_of_timestamp) == 0:
self.__errors[(time(), timestamp_basename)] = {
"reason": "Missing staged file",
"current_staged_files": staged_files}
continue
staged_splitted_files_of_timestamp.sort()
unified_timestamp = list()
for staging_digits, splitted_file in enumerate(
self.__timestamps_unregulated[timestamp_basename]):
prev_splits_sec = 0
if int(staging_digits) != 0:
prev_splits_sec = self._get_audio_duration_seconds(
"{}/staging/{}{:03d}".format(
self.src_dir, timestamp_name,
staging_digits - 1))
for word_block in splitted_file:
unified_timestamp.append(
_WordBlock(
word=word_block.word,
start=round(word_block.start +
prev_splits_sec, 2),
end=round(word_block.end +
prev_splits_sec, 2)))
unified_timestamps[
str(timestamp_basename)] += unified_timestamp
else:
unified_timestamps[
timestamp_basename] += self.__timestamps_unregulated[
timestamp_basename][0]
self.__timestamps.update(unified_timestamps)
self.__timestamps_unregulated = _PrettyDefaultDict(list)
|
python
|
{
"resource": ""
}
|
q10146
|
SimpleAudioIndexer.save_indexed_audio
|
train
|
def save_indexed_audio(self, indexed_audio_file_abs_path):
"""
Writes the corrected timestamps to a file. Timestamps are a python
dictionary.
Parameters
----------
indexed_audio_file_abs_path : str
"""
with open(indexed_audio_file_abs_path, "wb") as f:
pickle.dump(self.get_timestamps(), f, pickle.HIGHEST_PROTOCOL)
|
python
|
{
"resource": ""
}
|
q10147
|
SimpleAudioIndexer._partial_search_validator
|
train
|
def _partial_search_validator(self, sub, sup, anagram=False,
subsequence=False, supersequence=False):
"""
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
"""
def get_all_in(one, another):
for element in one:
if element in another:
yield element
def containment_check(sub, sup):
return (set(Counter(sub).keys()).issubset(
set(Counter(sup).keys())))
def containment_freq_check(sub, sup):
return (all([Counter(sub)[element] <= Counter(sup)[element]
for element in Counter(sub)]))
def extra_freq_check(sub, sup, list_of_tups):
# Would be used for matching anagrams, subsequences etc.
return (len(list_of_tups) > 0 and
all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]]
for tup in list_of_tups]))
# Regarding containment checking while having extra conditions,
# there's no good way to map each anagram or subseuqnece etc. that was
# found to the query word, without making it more complicated than
# it already is, because a query word can be anagram/subsequence etc.
# to multiple words of the timestamps yet finding the one with the
# right index would be the problem.
# Therefore we just approximate the solution by just counting
# the elements.
if len(sub) > len(sup):
return False
for pred, func in set([(anagram, self._is_anagram_of),
(subsequence, self._is_subsequence_of),
(supersequence, self._is_supersequence_of)]):
if pred:
pred_seive = [(sub_key, sup_key)
for sub_key in set(Counter(sub).keys())
for sup_key in set(Counter(sup).keys())
if func(sub_key, sup_key)]
if not extra_freq_check(sub, sup, pred_seive):
return False
if (
not any([anagram, subsequence, supersequence]) and
(not containment_check(sub, sup) or
not containment_freq_check(sub, sup))
):
return False
for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)):
if x1 != x2:
return False
return True
|
python
|
{
"resource": ""
}
|
q10148
|
SimpleAudioIndexer.search_all
|
train
|
def search_all(self, queries, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
Returns a dictionary of all results of all of the queries for all of
the audio files.
All the specified parameters work per query.
Parameters
----------
queries : [str] or str
A list of the strings that'll be searched. If type of queries is
`str`, it'll be insterted into a list within the body of the
method.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`.
case_sensitive : bool
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
Raises
------
TypeError
if `queries` is neither a list nor a str
"""
search_gen_rest_of_kwargs = {
"audio_basename": audio_basename,
"case_sensitive": case_sensitive,
"subsequence": subsequence,
"supersequence": supersequence,
"timing_error": timing_error,
"anagram": anagram,
"missing_word_tolerance": missing_word_tolerance}
if not isinstance(queries, (list, str)):
raise TypeError("Invalid query type.")
if type(queries) is not list:
queries = [queries]
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for query in queries:
search_gen = self.search_gen(query=query,
**search_gen_rest_of_kwargs)
for search_result in search_gen:
search_results[query][
search_result["File Name"]].append(search_result["Result"])
return search_results
|
python
|
{
"resource": ""
}
|
q10149
|
SimpleAudioIndexer.search_regexp
|
train
|
def search_regexp(self, pattern, audio_basename=None):
"""
First joins the words of the word_blocks of timestamps with space, per
audio_basename. Then matches `pattern` and calculates the index of the
word_block where the first and last word of the matched result appears
in. Then presents the output like `search_all` method.
Note that the leading and trailing spaces from the matched results
would be removed while determining which word_block they belong to.
Parameters
----------
pattern : str
A regex pattern.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `False`.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
"""
def indexes_in_transcript_to_start_end_second(index_tup,
audio_basename):
"""
Calculates the word block index by having the beginning and ending
index of the matched result from the transcription
Parameters
----------
index_tup : (int, tup)
index_tup is of the form tuple(index_start, index_end)
audio_basename : str
Retrun
------
[float, float]
The time of the output of the matched result. Derived from two
separate word blocks belonging to the beginning and the end of
the index_start and index_end.
"""
space_indexes = [i for i, x in enumerate(
transcription[audio_basename]) if x == " "]
space_indexes.sort(reverse=True)
index_start, index_end = index_tup
# re.finditer returns the ending index by one more
index_end -= 1
while transcription[audio_basename][index_start] == " ":
index_start += 1
while transcription[audio_basename][index_end] == " ":
index_end -= 1
block_number_start = 0
block_number_end = len(space_indexes)
for block_cursor, space_index in enumerate(space_indexes):
if index_start > space_index:
block_number_start = (len(space_indexes) - block_cursor)
break
for block_cursor, space_index in enumerate(space_indexes):
if index_end > space_index:
block_number_end = (len(space_indexes) - block_cursor)
break
return (timestamps[audio_basename][block_number_start].start,
timestamps[audio_basename][block_number_end].end)
timestamps = self.get_timestamps()
if audio_basename is not None:
timestamps = {audio_basename: timestamps[audio_basename]}
transcription = {
audio_basename: ' '.join(
[word_block.word for word_block in timestamps[audio_basename]]
) for audio_basename in timestamps}
match_map = map(
lambda audio_basename: tuple((
audio_basename,
re.finditer(pattern, transcription[audio_basename]))),
transcription.keys())
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for audio_basename, match_iter in match_map:
for match in match_iter:
search_results[match.group()][audio_basename].append(
tuple(indexes_in_transcript_to_start_end_second(
match.span(), audio_basename)))
return search_results
|
python
|
{
"resource": ""
}
|
q10150
|
gf_poly_mul_simple
|
train
|
def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower
'''Multiply two polynomials, inside Galois Field'''
# Pre-allocate the result array
r = bytearray(len(p) + len(q) - 1)
# Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q)
for j in xrange(len(q)):
for i in xrange(len(p)):
r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication
return r
|
python
|
{
"resource": ""
}
|
q10151
|
rs_correct_msg
|
train
|
def rs_correct_msg(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function'''
global field_charac
if len(msg_in) > field_charac:
# Note that it is in fact possible to encode/decode messages that are longer than field_charac, but because this will be above the field, this will generate more error positions during Chien Search than it should, because this will generate duplicate values, which should normally be prevented thank's to the prime polynomial reduction (eg, because it can't discriminate between error at position 1 or 256, both being exactly equal under galois field 2^8). So it's really not advised to do it, but it's possible (but then you're not guaranted to be able to correct any error/erasure on symbols with a position above the length of field_charac -- if you really need a bigger message without chunking, then you should better enlarge c_exp so that you get a bigger field).
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures to correct (beyond the Singleton bound)
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# Find errors locations
if only_erasures:
err_pos = []
else:
# compute the Forney syndromes, which hide the erasures from the original syndrome (so that BM will just have to deal with errors, not erasures)
fsynd = rs_forney_syndromes(synd, erase_pos, len(msg_out), generator)
# compute the error locator polynomial using Berlekamp-Massey
err_loc = rs_find_error_locator(fsynd, nsym, erase_count=len(erase_pos))
# locate the message errors using Chien search (or bruteforce search)
err_pos = rs_find_errors(err_loc[::-1], len(msg_out), generator)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# Find errors values and apply them to correct the message
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, (erase_pos + err_pos), fcr, generator) # note that we here use the original syndrome, not the forney syndrome (because we will correct both errors and erasures, so we need the full syndrome)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:]
|
python
|
{
"resource": ""
}
|
q10152
|
rs_correct_msg_nofsynd
|
train
|
def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function, without using the modified Forney syndromes'''
global field_charac
if len(msg_in) > field_charac:
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# prepare erasures locator and evaluator polynomials
erase_loc = None
#erase_eval = None
erase_count = 0
if erase_pos:
erase_count = len(erase_pos)
erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos]
erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator)
#erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1)
# prepare errors/errata locator polynomial
if only_erasures:
err_loc = erase_loc[::-1]
#err_eval = erase_eval[::-1]
else:
err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count)
err_loc = err_loc[::-1]
#err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM)
# locate the message errors
err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:]
|
python
|
{
"resource": ""
}
|
q10153
|
RSCodec.decode
|
train
|
def decode(self, data, erase_pos=None, only_erasures=False):
'''Repair a message, whatever its size is, by using chunking'''
# erase_pos is a list of positions where you know (or greatly suspect at least) there is an erasure (ie, wrong character but you know it's at this position). Just input the list of all positions you know there are errors, and this method will automatically split the erasures positions to attach to the corresponding data chunk.
if isinstance(data, str):
data = bytearray(data, "latin-1")
dec = bytearray()
for i in xrange(0, len(data), self.nsize):
# Split the long message in a chunk
chunk = data[i:i+self.nsize]
# Extract the erasures for this chunk
e_pos = []
if erase_pos:
# First extract the erasures for this chunk (all erasures below the maximum chunk length)
e_pos = [x for x in erase_pos if x <= self.nsize]
# Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize (the current chunk's size) so as to prepare the correct alignment for the next iteration
erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize]
# Decode/repair this chunk!
dec.extend(rs_correct_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures)[0])
return dec
|
python
|
{
"resource": ""
}
|
q10154
|
find_loops
|
train
|
def find_loops( record, index, stop_types = STOP_TYPES, open=None, seen = None ):
"""Find all loops within the index and replace with loop records"""
if open is None:
open = []
if seen is None:
seen = set()
for child in children( record, index, stop_types = stop_types ):
if child['type'] in stop_types or child['type'] == LOOP_TYPE:
continue
if child['address'] in open:
# loop has been found
start = open.index( child['address'] )
new = frozenset( open[start:] )
if new not in seen:
seen.add(new)
yield new
elif child['address'] in seen:
continue
else:
seen.add( child['address'])
open.append( child['address'] )
for loop in find_loops( child, index, stop_types=stop_types, open=open, seen=seen ):
yield loop
open.pop( -1 )
|
python
|
{
"resource": ""
}
|
q10155
|
promote_loops
|
train
|
def promote_loops( loops, index, shared ):
"""Turn loops into "objects" that can be processed normally"""
for loop in loops:
loop = list(loop)
members = [index[addr] for addr in loop]
external_parents = list(set([
addr for addr in sum([shared.get(addr,[]) for addr in loop],[])
if addr not in loop
]))
if external_parents:
if len(external_parents) == 1:
# potentially a loop that's been looped...
parent = index.get( external_parents[0] )
if parent['type'] == LOOP_TYPE:
continue
# we haven't already been looped...
loop_addr = new_address( index )
shared[loop_addr] = external_parents
loop_record = index[loop_addr] = {
'address': loop_addr,
'refs': loop,
'parents': external_parents,
'type': LOOP_TYPE,
'size': 0,
}
for member in members:
# member's references must *not* point to loop...
member['refs'] = [
ref for ref in member['refs']
if ref not in loop
]
# member's parents are *just* the loop
member['parents'][:] = [loop_addr]
# each referent to loop holds a single reference to the loop rather than many to children
for parent in external_parents:
parent = index[parent]
for member in members:
rewrite_references( parent['refs'], member['address'], None )
parent['refs'].append( loop_addr )
|
python
|
{
"resource": ""
}
|
q10156
|
children
|
train
|
def children( record, index, key='refs', stop_types=STOP_TYPES ):
"""Retrieve children records for given record"""
result = []
for ref in record.get( key,[]):
try:
record = index[ref]
except KeyError, err:
#print 'No record for %s address %s in %s'%(key, ref, record['address'] )
pass # happens when an unreachable references a reachable that has been compressed out...
else:
if record['type'] not in stop_types:
result.append( record )
return result
|
python
|
{
"resource": ""
}
|
q10157
|
children_types
|
train
|
def children_types( record, index, key='refs', stop_types=STOP_TYPES ):
"""Produce dictionary mapping type-key to instances for all children"""
types = {}
for child in children( record, index, key, stop_types=stop_types ):
types.setdefault(child['type'],[]).append( child )
return types
|
python
|
{
"resource": ""
}
|
q10158
|
recurse_module
|
train
|
def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 ):
"""Creates a has-a recursive-cost hierarchy
Mutates objects in-place to produce a hierarchy of memory usage based on
reference-holding cost assignment
"""
for record in recurse(
overall_record, index,
stop_types=stop_types,
already_seen=already_seen,
type_group=True,
):
# anything with a totsize we've already processed...
if record.get('totsize') is not None:
continue
rinfo = record
rinfo['module'] = overall_record.get('name',NON_MODULE_REFS )
if not record['refs']:
rinfo['rsize'] = 0
rinfo['children'] = []
else:
# TODO: provide a flag to coalesce based on e.g. type at each level or throughout...
rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) )
rinfo['rsize'] = sum([
(
child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1)
)
for child in rinfo_children
], 0.0 )
rinfo['totsize'] = record['size'] + rinfo['rsize']
return None
|
python
|
{
"resource": ""
}
|
q10159
|
simple
|
train
|
def simple( child, shared, parent ):
"""Return sub-set of children who are "simple" in the sense of group_children"""
return (
not child.get('refs',())
and (
not shared.get(child['address'])
or
shared.get(child['address']) == [parent['address']]
)
)
|
python
|
{
"resource": ""
}
|
q10160
|
deparent_unreachable
|
train
|
def deparent_unreachable( reachable, shared ):
"""Eliminate all parent-links from unreachable objects from reachable objects
"""
for id,shares in shared.iteritems():
if id in reachable: # child is reachable
filtered = [
x
for x in shares
if x in reachable # only those parents which are reachable
]
if len(filtered) != len(shares):
shares[:] = filtered
|
python
|
{
"resource": ""
}
|
q10161
|
bind_parents
|
train
|
def bind_parents( index, shared ):
"""Set parents on all items in index"""
for v in iterindex( index ):
v['parents'] = shared.get( v['address'], [] )
|
python
|
{
"resource": ""
}
|
q10162
|
find_roots
|
train
|
def find_roots( disconnected, index, shared ):
"""Find appropriate "root" objects from which to recurse the hierarchies
Will generate a synthetic root for anything which doesn't have any parents...
"""
log.warn( '%s disconnected objects in %s total objects', len(disconnected), len(index))
natural_roots = [x for x in disconnected if x.get('refs') and not x.get('parents')]
log.warn( '%s objects with no parents at all' ,len(natural_roots))
for natural_root in natural_roots:
recurse_module(
natural_root, index, shared
)
yield natural_root
rest = [x for x in disconnected if x.get( 'totsize' ) is None]
un_found = {
'type': 'module',
'name': '<disconnected objects>',
'children': rest,
'parents': [ ],
'size': 0,
'totsize': sum([x['size'] for x in rest],0),
'address': new_address( index ),
}
index[un_found['address']] = un_found
yield un_found
|
python
|
{
"resource": ""
}
|
q10163
|
Loader.get_root
|
train
|
def get_root( self, key ):
"""Retrieve the given root by type-key"""
if key not in self.roots:
root,self.rows = load( self.filename, include_interpreter = self.include_interpreter )
self.roots[key] = root
return self.roots[key]
|
python
|
{
"resource": ""
}
|
q10164
|
Dinergate.url
|
train
|
def url(self):
"""The fetching target URL.
The default behavior of this property is build URL string with the
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE`.
The subclasses could override
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE` or use a different
implementation.
"""
if not self.URL_TEMPLATE:
raise NotImplementedError
return self.URL_TEMPLATE.format(self=self)
|
python
|
{
"resource": ""
}
|
q10165
|
Site.record_action
|
train
|
def record_action(self, method_name, *args, **kwargs):
"""Record the method-calling action.
The actions expect to be played on an target object.
:param method_name: the name of called method.
:param args: the general arguments for calling method.
:param kwargs: the keyword arguments for calling method.
"""
self.actions.append((method_name, args, kwargs))
|
python
|
{
"resource": ""
}
|
q10166
|
Site.play_actions
|
train
|
def play_actions(self, target):
"""Play record actions on the target object.
:param target: the target which recive all record actions, is a brown
ant app instance normally.
:type target: :class:`~brownant.app.Brownant`
"""
for method_name, args, kwargs in self.actions:
method = getattr(target, method_name)
method(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10167
|
Site.route
|
train
|
def route(self, host, rule, **options):
"""The decorator to register wrapped function as the brown ant app.
All optional parameters of this method are compatible with the
:meth:`~brownant.app.Brownant.add_url_rule`.
Registered functions or classes must be import-able with its qualified
name. It is different from the :class:`~flask.Flask`, but like a
lazy-loading mode. Registered objects only be loaded before the first
using.
The right way::
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
The wrong way::
def egg():
# the function could not be imported by its qualified name
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
egg()
:param host: the limited host name.
:param rule: the URL path rule as string.
:param options: the options to be forwarded to the
:class:`werkzeug.routing.Rule` object.
"""
def decorator(func):
endpoint = "{func.__module__}:{func.__name__}".format(func=func)
self.record_action("add_url_rule", host, rule, endpoint, **options)
return func
return decorator
|
python
|
{
"resource": ""
}
|
q10168
|
to_bytes_safe
|
train
|
def to_bytes_safe(text, encoding="utf-8"):
"""Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
"""
if not isinstance(text, (bytes, text_type)):
raise TypeError("must be string type")
if isinstance(text, text_type):
return text.encode(encoding)
return text
|
python
|
{
"resource": ""
}
|
q10169
|
Brownant.add_url_rule
|
train
|
def add_url_rule(self, host, rule_string, endpoint, **options):
"""Add a url rule to the app instance.
The url rule is the same with Flask apps and other Werkzeug apps.
:param host: the matched hostname. e.g. "www.python.org"
:param rule_string: the matched path pattern. e.g. "/news/<int:id>"
:param endpoint: the endpoint name as a dispatching key such as the
qualified name of the object.
"""
rule = Rule(rule_string, host=host, endpoint=endpoint, **options)
self.url_map.add(rule)
|
python
|
{
"resource": ""
}
|
q10170
|
Brownant.parse_url
|
train
|
def parse_url(self, url_string):
"""Parse the URL string with the url map of this app instance.
:param url_string: the origin URL string.
:returns: the tuple as `(url, url_adapter, query_args)`, the url is
parsed by the standard library `urlparse`, the url_adapter is
from the werkzeug bound URL map, the query_args is a
multidict from the werkzeug.
"""
url = urllib.parse.urlparse(url_string)
url = self.validate_url(url)
url_adapter = self.url_map.bind(server_name=url.hostname,
url_scheme=url.scheme,
path_info=url.path)
query_args = url_decode(url.query)
return url, url_adapter, query_args
|
python
|
{
"resource": ""
}
|
q10171
|
Brownant.dispatch_url
|
train
|
def dispatch_url(self, url_string):
"""Dispatch the URL string to the target endpoint function.
:param url_string: the origin URL string.
:returns: the return value of calling dispatched function.
"""
url, url_adapter, query_args = self.parse_url(url_string)
try:
endpoint, kwargs = url_adapter.match()
except NotFound:
raise NotSupported(url_string)
except RequestRedirect as e:
new_url = "{0.new_url}?{1}".format(e, url_encode(query_args))
return self.dispatch_url(new_url)
try:
handler = import_string(endpoint)
request = Request(url=url, args=query_args)
return handler(request, **kwargs)
except RequestRedirect as e:
return self.dispatch_url(e.new_url)
|
python
|
{
"resource": ""
}
|
q10172
|
Brownant.mount_site
|
train
|
def mount_site(self, site):
"""Mount a supported site to this app instance.
:param site: the site instance be mounted.
"""
if isinstance(site, string_types):
site = import_string(site)
site.play_actions(target=self)
|
python
|
{
"resource": ""
}
|
q10173
|
Github.githubWebHookConsumer
|
train
|
def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10174
|
Github.badge
|
train
|
def badge(self, *args, **kwargs):
"""
Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10175
|
Github.createComment
|
train
|
def createComment(self, *args, **kwargs):
"""
Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10176
|
lorem_gotham
|
train
|
def lorem_gotham():
"""Cheesy Gothic Poetry Generator
Uses Python generators to yield eternal angst.
When you need to generate random verbiage to test your code or
typographic design, let's face it... Lorem Ipsum and "the quick
brown fox" are old and boring!
What you need is something with *flavor*, the kind of thing a
depressed teenager with a lot of black makeup would write.
"""
w = lambda l: l[random.randrange(len(l))]
er = lambda w: w[:-1]+'ier' if w.endswith('y') else (w+'r' if w.endswith('e') else w+'er')
s = lambda w: w+'s'
punc = lambda c, *l: " ".join(l)+c
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
while True:
yield pick(
sentence('the',w(adj),w(them),'and the',w(them),w(them_verb)),
sentence('delivering me to',w(place)),
sentence('they',w(action),'my',w(me_part),'and',w(me_verb),'with all my',w(feeling)),
sentence('in the',w(place),'my',w(feeling),'shall',w(me_verb)),
sentence(punc(',', er(w(adj)),'than the a petty',w(feeling))),
sentence(er(w(adj)),'than',w(them),'in',w(place)),
sentence(punc('!','oh my',w(me_part)),punc('!','the',w(feeling))),
sentence('no one',s(w(angst)),'why the',w(them),w(them_verb + me_verb)))
|
python
|
{
"resource": ""
}
|
q10177
|
lorem_gotham_title
|
train
|
def lorem_gotham_title():
"""Names your poem
"""
w = lambda l: l[random.randrange(len(l))]
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
return pick(
sentence('why i',w(me_verb)),
sentence(w(place)),
sentence('a',w(adj),w(adj),w(place)),
sentence('the',w(them)))
|
python
|
{
"resource": ""
}
|
q10178
|
main
|
train
|
def main():
"""I provide a command-line interface for this module
"""
print()
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print(lorem_gotham_title().center(50))
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print()
poem = lorem_gotham()
for n in range(16):
if n in (4, 8, 12):
print()
print(next(poem))
print()
|
python
|
{
"resource": ""
}
|
q10179
|
EC2Manager.listWorkerTypes
|
train
|
async def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10180
|
EC2Manager.runInstance
|
train
|
async def runInstance(self, *args, **kwargs):
"""
Run an instance
Request an instance of a worker type
This method takes input: ``v1/run-instance-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10181
|
EC2Manager.workerTypeStats
|
train
|
async def workerTypeStats(self, *args, **kwargs):
"""
Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``v1/worker-type-resources.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10182
|
EC2Manager.workerTypeHealth
|
train
|
async def workerTypeHealth(self, *args, **kwargs):
"""
Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10183
|
EC2Manager.workerTypeErrors
|
train
|
async def workerTypeErrors(self, *args, **kwargs):
"""
Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10184
|
EC2Manager.workerTypeState
|
train
|
async def workerTypeState(self, *args, **kwargs):
"""
Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``v1/worker-type-state.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10185
|
EC2Manager.ensureKeyPair
|
train
|
async def ensureKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``v1/create-key-pair.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10186
|
EC2Manager.removeKeyPair
|
train
|
async def removeKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10187
|
EC2Manager.terminateInstance
|
train
|
async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10188
|
EC2Manager.getHealth
|
train
|
async def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10189
|
EC2Manager.getRecentErrors
|
train
|
async def getRecentErrors(self, *args, **kwargs):
"""
Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10190
|
EC2Manager.regions
|
train
|
async def regions(self, *args, **kwargs):
"""
See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10191
|
EC2Manager.amiUsage
|
train
|
async def amiUsage(self, *args, **kwargs):
"""
See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10192
|
EC2Manager.ebsUsage
|
train
|
async def ebsUsage(self, *args, **kwargs):
"""
See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10193
|
EC2Manager.dbpoolStats
|
train
|
async def dbpoolStats(self, *args, **kwargs):
"""
Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10194
|
EC2Manager.sqsStats
|
train
|
async def sqsStats(self, *args, **kwargs):
"""
Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10195
|
EC2Manager.purgeQueues
|
train
|
async def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10196
|
GithubEvents.pullRequest
|
train
|
def pullRequest(self, *args, **kwargs):
"""
GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
"""
ref = {
'exchange': 'pull-request',
'name': 'pullRequest',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
{
'multipleWords': False,
'name': 'action',
},
],
'schema': 'v1/github-pull-request-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10197
|
GithubEvents.push
|
train
|
def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10198
|
GithubEvents.release
|
train
|
def release(self, *args, **kwargs):
"""
GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'release',
'name': 'release',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-release-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10199
|
GithubEvents.taskGroupCreationRequested
|
train
|
def taskGroupCreationRequested(self, *args, **kwargs):
"""
tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'task-group-creation-requested',
'name': 'taskGroupCreationRequested',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/task-group-creation-requested.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.