_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q39700 | LigolwSegments.insert_from_segmentlistdict | train | def insert_from_segmentlistdict(self, seglists, name, version = None, comment = None, valid=None):
"""
Insert the segments from the segmentlistdict object
seglists as a new list of "active" segments into this
LigolwSegments object. The dictionary's keys are assumed
to provide the instrument name for each segment list. A
new entry will be created in the segment_definer table for
the segment lists, and the dictionary's keys, the name, and
comment will be used to populate the entry's metadata.
"""
for instrument, segments in seglists.items():
if valid is None:
curr_valid = ()
else:
curr_valid = valid[instrument]
self.add(LigolwSegmentList(active = segments, instruments = set([instrument]), name = name, version = version, comment = comment, valid = curr_valid)) | python | {
"resource": ""
} |
q39701 | LigolwSegments.get_by_name | train | def get_by_name(self, name, clip_to_valid = False):
"""
Retrieve the active segmentlists whose name equals name.
The result is a segmentlistdict indexed by instrument. All
segmentlist objects within it will be copies of the
contents of this object, modifications will not affect the
contents of this object. If clip_to_valid is True then the
segmentlists will be intersected with their respective
intervals of validity, otherwise they will be the verbatim
active segments.
NOTE: the intersection operation required by clip_to_valid
will yield undefined results unless the active and valid
segmentlist objects are coalesced.
"""
result = segments.segmentlistdict()
for seglist in self:
if seglist.name != name:
continue
segs = seglist.active
if clip_to_valid:
# do not use in-place intersection
segs = segs & seglist.valid
for instrument in seglist.instruments:
if instrument in result:
raise ValueError("multiple '%s' segmentlists for instrument '%s'" % (name, instrument))
result[instrument] = segs.copy()
return result | python | {
"resource": ""
} |
q39702 | LigolwSegments.finalize | train | def finalize(self, process_row = None):
"""
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
"""
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
#
# ensure ID generators are synchronized with table contents
#
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
#
# put all segment lists in time order
#
self.sort()
#
# generator function to convert segments into row objects,
# each paired with the table to which the row is to be
# appended
#
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if 'comment' in target_table.validcolumns:
row.comment = None
yield row, target_table
#
# populate the segment_definer table from the list of
# LigolwSegmentList objects and construct a matching list
# of table row generators. empty ourselves to prevent this
# process from being repeated
#
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
#
# populate segment and segment_summary tables by pulling
# rows from the generators in time order
#
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row) | python | {
"resource": ""
} |
q39703 | GraphBuilder.add_graph | train | def add_graph(self, rhs_graph):
"""
Adds a graph to self.g
:param rhs_graph: the graph to add
:return: itself
"""
rhs_graph = self.__substitute_names_in_graph(rhs_graph)
self.g = self.__merge_graphs(self.g, rhs_graph)
return self | python | {
"resource": ""
} |
q39704 | GraphBuilder.set | train | def set(self, code):
"""
Executes the code and apply it to the self.g
:param code: the LISP code to execute
:return: True/False, depending on the result of the LISP code
"""
if self.update:
self.vertices_substitution_dict, self.edges_substitution_dict, self.match_info\
= self.match.get_variables_substitution_dictionaries(self.g, self.matching_graph)
try:
self.matching_graph = self.__apply_code_to_graph(code, self.matching_graph)
except:
pass
try:
code = self.__substitute_names_in_code(code)
self.g = self.__apply_code_to_graph(code, self.g)
except:
pass
return True | python | {
"resource": ""
} |
q39705 | get_lock | train | def get_lock(lockfile):
"""
Tries to write a lockfile containing the current pid. Excepts if
the lockfile already contains the pid of a running process.
Although this should prevent a lock from being granted twice, it
can theoretically deny a lock unjustly in the unlikely event that
the original process is gone but another unrelated process has
been assigned the same pid by the OS.
"""
pidfile = open(lockfile, "a+")
# here we do some meta-locking by getting an exclusive lock on the
# pidfile before reading it, to prevent two daemons from seeing a
# stale lock at the same time, and both trying to run
try:
fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError,e:
raise RuntimeError, "failed to lock %s: %s" % (lockfile, e)
# we got the file lock, so check for a pid therein
pidfile.seek(0)
pidfile_pid = pidfile.readline().strip()
if pidfile_pid.isdigit():
if pycbc_glue.utils.pid_exists(int(pidfile_pid)):
raise RuntimeError, ("pidfile %s contains pid (%s) of a running "
"process" % (lockfile, pidfile_pid))
else:
print ("pidfile %s contains stale pid %s; writing new lock" %
(lockfile, pidfile_pid))
# the pidfile didn't exist or was stale, so grab a new lock
pidfile.truncate(0)
pidfile.write("%d\n" % os.getpid())
pidfile.close()
# should be entirely unecessary, but paranoia always served me well
confirm_lock(lockfile)
return True | python | {
"resource": ""
} |
q39706 | confirm_lock | train | def confirm_lock(lockfile):
"""
Confirm that the given lockfile contains our pid.
Should be entirely unecessary, but paranoia always served me well.
"""
pidfile = open(lockfile, "r")
pidfile_pid = pidfile.readline().strip()
pidfile.close()
if int(pidfile_pid) != os.getpid():
raise RuntimeError, ("pidfile %s contains pid %s; expected pid %s!" %
(lockfile, os.getpid(), pidfile_pid))
return True | python | {
"resource": ""
} |
q39707 | _totuple | train | def _totuple( x ):
"""Utility stuff to convert string, int, long, float, None or anything to a usable tuple."""
if isinstance( x, basestring ):
out = x,
elif isinstance( x, ( int, long, float ) ):
out = str( x ),
elif x is None:
out = None,
else:
out = tuple( x )
return out | python | {
"resource": ""
} |
q39708 | escape | train | def escape( text, newline=False ):
"""Escape special html characters."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '\"' in text:
text = text.replace( '\"', '"' )
if '\'' in text:
text = text.replace( '\'', '"' )
if newline:
if '\n' in text:
text = text.replace( '\n', '<br>' )
return text | python | {
"resource": ""
} |
q39709 | element.render | train | def render( self, tag, single, between, kwargs ):
"""Append the actual tags to content."""
out = "<%s" % tag
for key, value in list( kwargs.items( ) ):
if value is not None: # when value is None that means stuff like <... checked>
key = key.strip('_') # strip this so class_ will mean class, etc.
if key == 'http_equiv': # special cases, maybe change _ to - overall?
key = 'http-equiv'
elif key == 'accept_charset':
key = 'accept-charset'
out = "%s %s=\"%s\"" % ( out, key, escape( value ) )
else:
out = "%s %s" % ( out, key )
if between is not None:
out = "%s>%s</%s>" % ( out, between, tag )
else:
if single:
out = "%s />" % out
else:
out = "%s>" % out
if self.parent is not None:
self.parent.content.append( out )
else:
return out | python | {
"resource": ""
} |
q39710 | element.close | train | def close( self ):
"""Append a closing tag unless element has only opening tag."""
if self.tag in self.parent.twotags:
self.parent.content.append( "</%s>" % self.tag )
elif self.tag in self.parent.onetags:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag ) | python | {
"resource": ""
} |
q39711 | element.open | train | def open( self, **kwargs ):
"""Append an opening tag."""
if self.tag in self.parent.twotags or self.tag in self.parent.onetags:
self.render( self.tag, False, None, kwargs )
elif self.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag ) | python | {
"resource": ""
} |
q39712 | MultiIter | train | def MultiIter(*sequences):
"""
A generator for iterating over the elements of multiple sequences
simultaneously. With N sequences given as input, the generator
yields all possible distinct N-tuples that contain one element from
each of the input sequences.
Example:
>>> x = MultiIter([0, 1, 2], [10, 11])
>>> list(x)
[(0, 10), (1, 10), (2, 10), (0, 11), (1, 11), (2, 11)]
The elements in each output tuple are in the order of the input
sequences, and the left-most input sequence is iterated over first.
Internally, the input sequences themselves are each iterated over
only once, so it is safe to pass generators as arguments. Also,
this generator is significantly faster if the longest input
sequence is given as the first argument. For example, this code
>>> lengths = range(1, 12)
>>> for x in MultiIter(*map(range, lengths)):
... pass
...
runs approximately 5 times faster if the lengths list is reversed.
"""
if len(sequences) > 1:
# FIXME: this loop is about 5% faster if done the other
# way around, if the last list is iterated over in the
# inner loop. but there is code, like snglcoinc.py in
# pylal, that has been optimized for the current order and
# would need to be reoptimized if this function were to be
# reversed.
head = tuple((x,) for x in sequences[0])
for t in MultiIter(*sequences[1:]):
for h in head:
yield h + t
elif sequences:
for t in sequences[0]:
yield (t,) | python | {
"resource": ""
} |
q39713 | choices | train | def choices(vals, n):
"""
A generator for iterating over all choices of n elements from the
input sequence vals. In each result returned, the original order
of the values is preserved.
Example:
>>> x = choices(["a", "b", "c"], 2)
>>> list(x)
[('a', 'b'), ('a', 'c'), ('b', 'c')]
The order of combinations in the output sequence is always the
same, so if choices() is called twice with two different sequences
of the same length the first combination in each of the two output
sequences will contain elements from the same positions in the two
different input sequences, and so on for each subsequent pair of
output combinations.
Example:
>>> x = choices(["a", "b", "c"], 2)
>>> y = choices(["1", "2", "3"], 2)
>>> zip(x, y)
[(('a', 'b'), ('1', '2')), (('a', 'c'), ('1', '3')), (('b', 'c'), ('2', '3'))]
Furthermore, the order of combinations in the output sequence is
such that if the input list has n elements, and one constructs the
combinations choices(input, m), then each combination in
choices(input, n-m).reverse() contains the elements discarded in
forming the corresponding combination in the former.
Example:
>>> x = ["a", "b", "c", "d", "e"]
>>> X = list(choices(x, 2))
>>> Y = list(choices(x, len(x) - 2))
>>> Y.reverse()
>>> zip(X, Y)
[(('a', 'b'), ('c', 'd', 'e')), (('a', 'c'), ('b', 'd', 'e')), (('a', 'd'), ('b', 'c', 'e')), (('a', 'e'), ('b', 'c', 'd')), (('b', 'c'), ('a', 'd', 'e')), (('b', 'd'), ('a', 'c', 'e')), (('b', 'e'), ('a', 'c', 'd')), (('c', 'd'), ('a', 'b', 'e')), (('c', 'e'), ('a', 'b', 'd')), (('d', 'e'), ('a', 'b', 'c'))]
"""
if n == len(vals):
yield tuple(vals)
elif n > 1:
n -= 1
for i, v in enumerate(vals[:-n]):
v = (v,)
for c in choices(vals[i+1:], n):
yield v + c
elif n == 1:
for v in vals:
yield (v,)
elif n == 0:
yield ()
else:
# n < 0
raise ValueError(n) | python | {
"resource": ""
} |
q39714 | nonuniq | train | def nonuniq(iterable):
"""
Yield the non-unique items of an iterable, preserving order. If an
item occurs N > 0 times in the input sequence, it will occur N-1
times in the output sequence.
Example:
>>> x = nonuniq([0, 0, 2, 6, 2, 0, 5])
>>> list(x)
[0, 2, 0]
"""
temp_dict = {}
for e in iterable:
if e in temp_dict:
yield e
temp_dict.setdefault(e, e) | python | {
"resource": ""
} |
q39715 | inorder | train | def inorder(*iterables, **kwargs):
"""
A generator that yields the values from several ordered iterables
in order.
Example:
>>> x = [0, 1, 2, 3]
>>> y = [1.5, 2.5, 3.5, 4.5]
>>> z = [1.75, 2.25, 3.75, 4.25]
>>> list(inorder(x, y, z))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> list(inorder(x, y, z, key=lambda x: x * x))
[0, 1, 1.5, 1.75, 2, 2.25, 2.5, 3, 3.5, 3.75, 4.25, 4.5]
>>> x.sort(key=lambda x: abs(x-3))
>>> y.sort(key=lambda x: abs(x-3))
>>> z.sort(key=lambda x: abs(x-3))
>>> list(inorder(x, y, z, key=lambda x: abs(x - 3)))
[3, 2.5, 3.5, 2.25, 3.75, 2, 1.75, 4.25, 1.5, 4.5, 1, 0]
>>> x = [3, 2, 1, 0]
>>> y = [4.5, 3.5, 2.5, 1.5]
>>> z = [4.25, 3.75, 2.25, 1.75]
>>> list(inorder(x, y, z, reverse = True))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
>>> list(inorder(x, y, z, key = lambda x: -x))
[4.5, 4.25, 3.75, 3.5, 3, 2.5, 2.25, 2, 1.75, 1.5, 1, 0]
NOTE: this function will never reverse the order of elements in
the input iterables. If the reverse keyword argument is False (the
default) then the input sequences must yield elements in increasing
order, likewise if the keyword argument is True then the input
sequences must yield elements in decreasing order. Failure to
adhere to this yields undefined results, and for performance
reasons no check is performed to validate the element order in the
input sequences.
"""
reverse = kwargs.pop("reverse", False)
keyfunc = kwargs.pop("key", lambda x: x) # default = identity
if kwargs:
raise TypeError("invalid keyword argument '%s'" % kwargs.keys()[0])
nextvals = {}
for iterable in iterables:
next = iter(iterable).next
try:
nextval = next()
nextvals[next] = keyfunc(nextval), nextval, next
except StopIteration:
pass
if not nextvals:
# all sequences are empty
return
if reverse:
select = max
else:
select = min
values = nextvals.itervalues
if len(nextvals) > 1:
while 1:
_, val, next = select(values())
yield val
try:
nextval = next()
nextvals[next] = keyfunc(nextval), nextval, next
except StopIteration:
del nextvals[next]
if len(nextvals) < 2:
break
# exactly one sequence remains, short circuit and drain it
(_, val, next), = values()
yield val
while 1:
yield next() | python | {
"resource": ""
} |
q39716 | randindex | train | def randindex(lo, hi, n = 1.):
"""
Yields integers in the range [lo, hi) where 0 <= lo < hi. Each
return value is a two-element tuple. The first element is the
random integer, the second is the natural logarithm of the
probability with which that integer will be chosen.
The CDF for the distribution from which the integers are drawn goes
as [integer]^{n}, where n > 0. Specifically, it's
CDF(x) = (x^{n} - lo^{n}) / (hi^{n} - lo^{n})
n = 1 yields a uniform distribution; n > 1 favours larger
integers, n < 1 favours smaller integers.
"""
if not 0 <= lo < hi:
raise ValueError("require 0 <= lo < hi: lo = %d, hi = %d" % (lo, hi))
if n <= 0.:
raise ValueError("n <= 0: %g" % n)
elif n == 1.:
# special case for uniform distribution
try:
lnP = math.log(1. / (hi - lo))
except ValueError:
raise ValueError("[lo, hi) domain error")
hi -= 1
rnd = random.randint
while 1:
yield rnd(lo, hi), lnP
# CDF evaluated at index boundaries
lnP = numpy.arange(lo, hi + 1, dtype = "double")**n
lnP -= lnP[0]
lnP /= lnP[-1]
# differences give probabilities
lnP = tuple(numpy.log(lnP[1:] - lnP[:-1]))
if numpy.isinf(lnP).any():
raise ValueError("[lo, hi) domain error")
beta = lo**n / (hi**n - lo**n)
n = 1. / n
alpha = hi / (1. + beta)**n
flr = math.floor
rnd = random.random
while 1:
index = int(flr(alpha * (rnd() + beta)**n))
# the tuple look-up provides the second part of the
# range safety check on index
assert index >= lo
yield index, lnP[index - lo] | python | {
"resource": ""
} |
q39717 | segment.shift | train | def shift(self, x):
"""
Return a new segment whose bounds are given by adding x to
the segment's upper and lower bounds.
"""
return tuple.__new__(self.__class__, (self[0] + x, self[1] + x)) | python | {
"resource": ""
} |
q39718 | segmentlist.extent | train | def extent(self):
"""
Return the segment whose end-points denote the maximum and
minimum extent of the segmentlist. Does not require the
segmentlist to be coalesced.
"""
if not len(self):
raise ValueError("empty list")
min, max = self[0]
for lo, hi in self:
if min > lo:
min = lo
if max < hi:
max = hi
return segment(min, max) | python | {
"resource": ""
} |
q39719 | segmentlist.find | train | def find(self, item):
"""
Return the smallest i such that i is the index of an
element that wholly contains item. Raises ValueError if no
such element exists. Does not require the segmentlist to
be coalesced.
"""
for i, seg in enumerate(self):
if item in seg:
return i
raise ValueError(item) | python | {
"resource": ""
} |
q39720 | segmentlistdict.map | train | def map(self, func):
"""
Return a dictionary of the results of func applied to each
of the segmentlist objects in self.
Example:
>>> x = segmentlistdict()
>>> x["H1"] = segmentlist([segment(0, 10)])
>>> x["H2"] = segmentlist([segment(5, 15)])
>>> x.map(lambda l: 12 in l)
{'H2': True, 'H1': False}
"""
return dict((key, func(value)) for key, value in self.iteritems()) | python | {
"resource": ""
} |
q39721 | segmentlistdict.keys_at | train | def keys_at(self, x):
"""
Return a list of the keys for the segment lists that
contain x.
Example:
>>> x = segmentlistdict()
>>> x["H1"] = segmentlist([segment(0, 10)])
>>> x["H2"] = segmentlist([segment(5, 15)])
>>> x.keys_at(12)
['H2']
"""
return [key for key, segs in self.items() if x in segs] | python | {
"resource": ""
} |
q39722 | segmentlistdict.intersects_segment | train | def intersects_segment(self, seg):
"""
Returns True if any segmentlist in self intersects the
segment, otherwise returns False.
"""
return any(value.intersects_segment(seg) for value in self.itervalues()) | python | {
"resource": ""
} |
q39723 | segmentlistdict.intersects | train | def intersects(self, other):
"""
Returns True if there exists a segmentlist in self that
intersects the corresponding segmentlist in other; returns
False otherwise.
See also:
.intersects_all(), .all_intersects(), .all_intersects_all()
"""
return any(key in self and self[key].intersects(value) for key, value in other.iteritems()) | python | {
"resource": ""
} |
q39724 | segmentlistdict.intersects_all | train | def intersects_all(self, other):
"""
Returns True if each segmentlist in other intersects the
corresponding segmentlist in self; returns False
if this is not the case, or if other is empty.
See also:
.intersects(), .all_intersects(), .all_intersects_all()
"""
return all(key in self and self[key].intersects(value) for key, value in other.iteritems()) and bool(other) | python | {
"resource": ""
} |
q39725 | segmentlistdict.all_intersects_all | train | def all_intersects_all(self, other):
"""
Returns True if self and other have the same keys, and each
segmentlist intersects the corresponding segmentlist in the
other; returns False if this is not the case or if either
dictionary is empty.
See also:
.intersects(), .all_intersects(), .intersects_all()
"""
return set(self) == set(other) and all(other[key].intersects(value) for key, value in self.iteritems()) and bool(self) | python | {
"resource": ""
} |
q39726 | segmentlistdict.extend | train | def extend(self, other):
"""
Appends the segmentlists from other to the corresponding
segmentlists in self, adding new segmentslists to self as
needed.
"""
for key, value in other.iteritems():
if key not in self:
self[key] = _shallowcopy(value)
else:
self[key].extend(value) | python | {
"resource": ""
} |
q39727 | segmentlistdict.extract_common | train | def extract_common(self, keys):
"""
Return a new segmentlistdict containing only those
segmentlists associated with the keys in keys, with each
set to their mutual intersection. The offsets are
preserved.
"""
keys = set(keys)
new = self.__class__()
intersection = self.intersection(keys)
for key in keys:
dict.__setitem__(new, key, _shallowcopy(intersection))
dict.__setitem__(new.offsets, key, self.offsets[key])
return new | python | {
"resource": ""
} |
q39728 | segmentlistdict.intersection | train | def intersection(self, keys):
"""
Return the intersection of the segmentlists associated with
the keys in keys.
"""
keys = set(keys)
if not keys:
return segmentlist()
seglist = _shallowcopy(self[keys.pop()])
for key in keys:
seglist &= self[key]
return seglist | python | {
"resource": ""
} |
q39729 | extract | train | def extract(connection, filename, table_names = None, verbose = False, xsl_file = None):
"""
Convert the database at the given connection to a tabular LIGO
Light-Weight XML document. The XML document is written to the file
named filename. If table_names is not None, it should be a
sequence of strings and only the tables in that sequence will be
converted. If verbose is True then progress messages will be
printed to stderr.
"""
xmldoc = ligolw.Document()
xmldoc.appendChild(dbtables.get_xml(connection, table_names))
ligolw_utils.write_filename(xmldoc, filename, gz = (filename or "stdout").endswith(".gz"), verbose = verbose, xsl_file = xsl_file)
# delete cursors
xmldoc.unlink() | python | {
"resource": ""
} |
q39730 | append_search_summary | train | def append_search_summary(xmldoc, process, shared_object = "standalone", lalwrapper_cvs_tag = "", lal_cvs_tag = "", comment = None, ifos = None, inseg = None, outseg = None, nevents = 0, nnodes = 1):
"""
Append search summary information associated with the given process
to the search summary table in xmldoc. Returns the newly-created
search_summary table row.
"""
row = lsctables.SearchSummary()
row.process_id = process.process_id
row.shared_object = shared_object
row.lalwrapper_cvs_tag = lalwrapper_cvs_tag
row.lal_cvs_tag = lal_cvs_tag
row.comment = comment or process.comment
row.instruments = ifos if ifos is not None else process.instruments
row.in_segment = inseg
row.out_segment = outseg
row.nevents = nevents
row.nnodes = nnodes
try:
tbl = lsctables.SearchSummaryTable.get_table(xmldoc)
except ValueError:
tbl = xmldoc.childNodes[0].appendChild(lsctables.New(lsctables.SearchSummaryTable))
tbl.append(row)
return row | python | {
"resource": ""
} |
q39731 | common_options | train | def common_options(func):
"""Commonly used command options."""
def parse_preset(ctx, param, value):
return PRESETS.get(value, (None, None))
def parse_private(ctx, param, value):
return hex_from_b64(value) if value else None
func = click.option('--private', default=None, help='Private.', callback=parse_private)(func)
func = click.option(
'--preset',
default=None, help='Preset ID defining prime and generator pair.',
type=click.Choice(PRESETS.keys()), callback=parse_preset
)(func)
return func | python | {
"resource": ""
} |
q39732 | get_session_data | train | def get_session_data( username, password_verifier, salt, client_public, private, preset):
"""Print out server session data."""
session = SRPServerSession(
SRPContext(username, prime=preset[0], generator=preset[1]),
hex_from_b64(password_verifier), private=private)
session.process(client_public, salt, base64=True)
click.secho('Server session key: %s' % session.key_b64)
click.secho('Server session key proof: %s' % session.key_proof_b64)
click.secho('Server session key hash: %s' % session.key_proof_hash_b64) | python | {
"resource": ""
} |
q39733 | get_session_data | train | def get_session_data(ctx, username, password, salt, server_public, private, preset):
"""Print out client session data."""
session = SRPClientSession(
SRPContext(username, password, prime=preset[0], generator=preset[1]),
private=private)
session.process(server_public, salt, base64=True)
click.secho('Client session key: %s' % session.key_b64)
click.secho('Client session key proof: %s' % session.key_proof_b64)
click.secho('Client session key hash: %s' % session.key_proof_hash_b64) | python | {
"resource": ""
} |
q39734 | cwd_decorator | train | def cwd_decorator(func):
"""
decorator to change cwd to directory containing rst for this function
"""
def wrapper(*args, **kw):
cur_dir = os.getcwd()
found = False
for arg in sys.argv:
if arg.endswith(".rst"):
found = arg
break
if found:
directory = os.path.dirname(found)
if directory:
os.chdir(directory)
data = func(*args, **kw)
os.chdir(cur_dir)
return data
return wrapper | python | {
"resource": ""
} |
q39735 | to_xml | train | def to_xml(node, pretty=False):
""" convert an etree node to xml """
fout = Sio()
etree = et.ElementTree(node)
etree.write(fout)
xml = fout.getvalue()
if pretty:
xml = pretty_xml(xml, True)
return xml | python | {
"resource": ""
} |
q39736 | pretty_xml | train | def pretty_xml(string_input, add_ns=False):
""" pretty indent string_input """
if add_ns:
elem = "<foo "
for key, value in DOC_CONTENT_ATTRIB.items():
elem += ' %s="%s"' % (key, value)
string_input = elem + ">" + string_input + "</foo>"
doc = minidom.parseString(string_input)
if add_ns:
s1 = doc.childNodes[0].childNodes[0].toprettyxml(" ")
else:
s1 = doc.toprettyxml(" ")
return s1 | python | {
"resource": ""
} |
q39737 | add_cell | train | def add_cell(preso, pos, width, height, padding=1, top_margin=4, left_margin=2):
""" Add a text frame to current slide """
available_width = SLIDE_WIDTH
available_width -= left_margin * 2
available_width -= padding * (width - 1)
column_width = available_width / width
avail_height = SLIDE_HEIGHT
avail_height -= top_margin
avail_height -= padding * (height - 1)
column_height = avail_height / height
col_pos = int((pos - 1) % width)
row_pos = int((pos - 1) / width)
w = "{}cm".format(column_width)
h = "{}cm".format(column_height)
x = "{}cm".format(left_margin + (col_pos * column_width + (col_pos) * padding))
y = "{}cm".format(top_margin + (row_pos * column_height + (row_pos) * padding))
attr = {
"presentation:class": "outline",
"presentation:style-name": "Default-outline1",
"svg:width": w,
"svg:height": h,
"svg:x": x,
"svg:y": y,
}
preso.slides[-1].add_text_frame(attr)
preso.slides[-1].grid_w_h_x_y = (w, h, x, y) | python | {
"resource": ""
} |
q39738 | Preso.add_otp_style | train | def add_otp_style(self, zip_odp, style_file):
"""
takes the slide content and merges in the style_file
"""
style = zipwrap.Zippier(style_file)
for picture_file in style.ls("Pictures"):
zip_odp.write(picture_file, style.cat(picture_file, True))
xml_data = style.cat("styles.xml", False)
# import pdb;pdb.set_trace()
xml_data = self.override_styles(xml_data)
zip_odp.write("styles.xml", xml_data) | python | {
"resource": ""
} |
q39739 | Picture.update_frame_attributes | train | def update_frame_attributes(self, attrib):
""" For positioning update the frame """
if "align" in self.user_defined:
align = self.user_defined["align"]
if "top" in align:
attrib["style:vertical-pos"] = "top"
if "right" in align:
attrib["style:horizontal-pos"] = "right"
return attrib | python | {
"resource": ""
} |
q39740 | Slide.update_style | train | def update_style(self, mapping):
"""Use to update fill-color"""
default = {
"presentation:background-visible": "true",
"presentation:background-objects-visible": "true",
"draw:fill": "solid",
"draw:fill-color": "#772953",
"draw:fill-image-width": "0cm",
"draw:fill-image-height": "0cm",
"presentation:display-footer": "true",
"presentation:display-page-number": "false",
"presentation:display-date-time": "true",
}
default.update(mapping)
style = PageStyle(**default)
node = style.style_node()
# add style to automatic-style
self.preso._auto_styles.append(node)
# update page style-name
# found in ._page
self._page.set(ns("draw", "style-name"), node.attrib[ns("style", "name")]) | python | {
"resource": ""
} |
q39741 | Slide._copy | train | def _copy(self):
""" needs to update page numbers """
ins = copy.copy(self)
ins._fire_page_number(self.page_number + 1)
return ins | python | {
"resource": ""
} |
q39742 | Slide.get_node | train | def get_node(self):
"""return etree Element representing this slide"""
# already added title, text frames
# add animation chunks
if self.animations:
anim_par = el("anim:par", attrib={"presentation:node-type": "timing-root"})
self._page.append(anim_par)
anim_seq = sub_el(
anim_par, "anim:seq", attrib={"presentation:node-type": "main-sequence"}
)
for a in self.animations:
a_node = a.get_node()
anim_seq.append(a_node)
# add notes now (so they are last)
if self.notes_frame:
notes = self.notes_frame.get_node()
self._page.append(notes)
if self.footer:
self._page.attrib[ns("presentation", "use-footer-name")] = self.footer.name
return self._page | python | {
"resource": ""
} |
q39743 | Slide.add_list | train | def add_list(self, bl):
"""
note that this pushes the cur_element, but doesn't pop it.
You'll need to do that
"""
# text:list doesn't like being a child of text:p
if self.cur_element is None:
self.add_text_frame()
self.push_element()
self.cur_element._text_box.append(bl.node)
style = bl.style_name
if style not in self._preso._styles_added:
self._preso._styles_added[style] = 1
content = bl.default_styles_root()[0]
self._preso._auto_styles.append(content)
self.cur_element = bl | python | {
"resource": ""
} |
q39744 | Slide.add_table | train | def add_table(self, t):
"""
remember to call pop_element after done with table
"""
self.push_element()
self._page.append(t.node)
self.cur_element = t | python | {
"resource": ""
} |
q39745 | XMLSlide.update_text | train | def update_text(self, mapping):
"""Iterate over nodes, replace text with mapping"""
found = False
for node in self._page.iter("*"):
if node.text or node.tail:
for old, new in mapping.items():
if node.text and old in node.text:
node.text = node.text.replace(old, new)
found = True
if node.tail and old in node.tail:
node.tail = node.tail.replace(old, new)
found = True
if not found:
raise KeyError("Updating text failed with mapping:{}".format(mapping)) | python | {
"resource": ""
} |
q39746 | MixedContent.parent_of | train | def parent_of(self, name):
"""
go to parent of node with name, and set as cur_node. Useful
for creating new paragraphs
"""
if not self._in_tag(name):
return
node = self.cur_node
while node.tag != name:
node = node.getparent()
self.cur_node = node.getparent() | python | {
"resource": ""
} |
q39747 | MixedContent._is_last_child | train | def _is_last_child(self, tagname, attributes=None):
"""
Check if last child of cur_node is tagname with attributes
"""
children = self.cur_node.getchildren()
if children:
result = self._is_node(tagname, attributes, node=children[-1])
return result
return False | python | {
"resource": ""
} |
q39748 | MixedContent._in_tag | train | def _in_tag(self, tagname, attributes=None):
"""
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
"""
node = self.cur_node
while not node is None:
if node.tag == tagname:
if attributes and node.attrib == attributes:
return True
elif attributes:
return False
return True
node = node.getparent()
return False | python | {
"resource": ""
} |
q39749 | MixedContent._check_add_node | train | def _check_add_node(self, parent, name):
""" Returns False if bad to make name a child of parent """
if name == ns("text", "a"):
if parent.tag == ns("draw", "text-box"):
return False
return True | python | {
"resource": ""
} |
q39750 | MixedContent._add_styles | train | def _add_styles(self, add_paragraph=True, add_text=True):
"""
Adds paragraph and span wrappers if necessary based on style
"""
p_styles = self.get_para_styles()
t_styles = self.get_span_styles()
for s in self.slide.pending_styles:
if isinstance(s, ParagraphStyle):
p_styles.update(s.styles)
elif isinstance(s, TextStyle):
t_styles.update(s.styles)
para = ParagraphStyle(**p_styles)
if add_paragraph or self.slide.paragraph_attribs:
p_attrib = {ns("text", "style-name"): para.name}
p_attrib.update(self.slide.paragraph_attribs)
if not self._in_tag(ns("text", "p"), p_attrib):
self.parent_of(ns("text", "p"))
# Create paragraph style first
self.slide._preso.add_style(para)
self.add_node("text:p", attrib=p_attrib)
# span is only necessary if style changes
if add_text and t_styles:
text = TextStyle(**t_styles)
children = self.cur_node.getchildren()
if children:
# if we already are using this text style, reuse the last one
last = children[-1]
if (
last.tag == ns("text", "span")
and last.attrib[ns("text", "style-name")] == text.name
and last.tail is None
): # if we have a tail, we can't reuse
self.cur_node = children[-1]
return
if not self._is_node(
ns("text", "span"), {ns("text", "style-name"): text.name}
):
# Create text style
self.slide._preso.add_style(text)
self.add_node("text:span", attrib={"text:style-name": text.name}) | python | {
"resource": ""
} |
q39751 | MixedContent.line_break | train | def line_break(self):
"""insert as many line breaks as the insert_line_break variable says
"""
for i in range(self.slide.insert_line_break):
# needs to be inside text:p
if not self._in_tag(ns("text", "p")):
# we can just add a text:p and no line-break
# Create paragraph style first
self.add_node(ns("text", "p"))
self.add_node(ns("text", "line-break"))
self.pop_node()
if self.cur_node.tag == ns("text", "p"):
return
if self.cur_node.getparent().tag != ns("text", "p"):
self.pop_node()
self.slide.insert_line_break = 0 | python | {
"resource": ""
} |
q39752 | Table.__tableStringParser | train | def __tableStringParser(self, tableString):
"""
Will parse and check tableString parameter for any invalid strings.
Args:
tableString (str): Standard table string with header and decisions.
Raises:
ValueError: tableString is empty.
ValueError: One of the header element is not unique.
ValueError: Missing data value.
ValueError: Missing parent data.
Returns:
Array of header and decisions::
print(return)
[
['headerVar1', ... ,'headerVarN'],
[
['decisionValue1', ... ,'decisionValueN'],
[<row2 strings>],
...
[<rowN strings>]
]
]
"""
error = []
header = []
decisions = []
if tableString.split() == []:
error.append('Table variable is empty!')
else:
tableString = tableString.split('\n')
newData = []
for element in tableString:
if element.strip():
newData.append(element)
for element in newData[0].split():
if not element in header:
header.append(element)
else:
error.append('Header element: ' + element + ' is not unique!')
for i, tableString in enumerate(newData[2:]):
split = tableString.split()
if len(split) == len(header):
decisions.append(split)
else:
error.append('Row: {}==> missing: {} data'.format(
str(i).ljust(4),
str(len(header) - len(split)).ljust(2))
)
if error:
view.Tli.showErrors('TableStringError', error)
else:
return [header, decisions] | python | {
"resource": ""
} |
q39753 | Table.__replaceSpecialValues | train | def __replaceSpecialValues(self, decisions):
"""
Will replace special values in decisions array.
Args:
decisions (array of array of str): Standard decision array format.
Raises:
ValueError: Row element don't have parent value.
Returns:
New decision array with updated values.
"""
error = []
for row, line in enumerate(decisions):
if '.' in line:
for i, element in enumerate(line):
if row == 0:
error.append(
"Row: {}colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4)))
if element == self.__parentSymbol:
if decisions[row - 1][i] == '.':
error.append("Row: {}Colume: {}==> don't have parent value".format(str(row).ljust(4),
str(i).ljust(4)))
decisions[row][i] = decisions[row - 1][i]
if error:
view.Tli.showErrors('ReplaceSpecialValuesError', error)
else:
return decisions | python | {
"resource": ""
} |
q39754 | Table.__toString | train | def __toString(self, values):
"""
Will replace dict values with string values
Args:
values (dict): Dictionary of values
Returns:
Updated values dict
"""
for key in values:
if not values[key] is str:
values[key] = str(values[key])
return values | python | {
"resource": ""
} |
q39755 | Table.__valueKeyWithHeaderIndex | train | def __valueKeyWithHeaderIndex(self, values):
"""
This is hellper function, so that we can mach decision values with row index
as represented in header index.
Args:
values (dict): Normaly this will have dict of header values and values from decision
Return:
>>> return()
{
values[headerName] : int(headerName index in header array),
...
}
"""
machingIndexes = {}
for index, name in enumerate(self.header):
if name in values:
machingIndexes[index] = values[name]
return machingIndexes | python | {
"resource": ""
} |
q39756 | Table.__checkDecisionParameters | train | def __checkDecisionParameters(self, result, **values):
"""
Checker of decision parameters, it will raise ValueError if finds something wrong.
Args:
result (array of str): See public decision methods
**values (array of str): See public decision methods
Raise:
ValueError: Result array none.
ValueError: Values dict none.
ValueError: Not find result key in header.
ValueError: Result value is empty.
Returns:
Error array values
"""
error = []
if not result:
error.append('Function parameter (result array) should contain one or more header string!')
if not values:
error.append('Function parameter (values variables) should contain one or more variable')
for header in result:
if not header in self.header:
error.append('String (' + header + ') in result is not in header!')
for header in values:
if not header in self.header:
error.append('Variable (' + header + ') in values is not in header!')
elif not values[header].split():
error.append('Variable (' + header + ') in values is empty string')
if error:
return error | python | {
"resource": ""
} |
q39757 | Table.__getDecision | train | def __getDecision(self, result, multiple=False, **values):
"""
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
"""
values = self.__toString(values)
__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)
errors = self.__checkDecisionParameters(result, **values)
if errors:
view.Tli.showErrors('ParametersError', errors)
machingData = {}
for line in self.decisions:
match = True
for index in __valueKeyWithHeaderIndex:
if line[index] != __valueKeyWithHeaderIndex[index]:
if line[index] != self.__wildcardSymbol:
match = False
break
if match:
if multiple:
for header in result:
if header not in machingData:
machingData[header] = [line[self.header.index(header)]]
else:
machingData[header].append(line[self.header.index(header)])
else:
for header in result:
machingData[header] = line[self.header.index(header)]
return machingData
if multiple:
if machingData:
return machingData
# Return none if not found (not string so
# not found value can be recognized
return dict((key, None) for key in result) | python | {
"resource": ""
} |
q39758 | Table.decision | train | def decision(self, result, **values):
"""
The decision method with callback option. This method will find matching row, construct
a dictionary and call callback with dictionary.
Args:
callback (function): Callback function will be called when decision will be finded.
result (array of str): Array of header string
**values (dict): What should finder look for, (headerString : value).
Returns:
Arrays of finded values strings
Example:
>>> table = DecisionTable('''
>>> header1 header2
>>> ===============
>>> value1 value2
>>> ''')
>>>
>>> header1, header2 = table.decision(
>>> ['header1','header2'],
>>> header1='value1',
>>> header2='value2'
>>> )
>>> print(header1,header2)
(value1 value2)
"""
data = self.__getDecision(result, **values)
data = [data[value] for value in result]
if len(data) == 1:
return data[0]
else:
return data | python | {
"resource": ""
} |
q39759 | Table.allDecisions | train | def allDecisions(self, result, **values):
"""
Joust like self.decision but for multiple finded values.
Returns:
Arrays of arrays of finded elements or if finds only one mach, array of strings.
"""
data = self.__getDecision(result, multiple=True, **values)
data = [data[value] for value in result]
if len(data) == 1:
return data[0]
else:
return data | python | {
"resource": ""
} |
q39760 | ThriftConnection._dict_to_map_str_str | train | def _dict_to_map_str_str(self, d):
"""
Thrift requires the params and headers dict values to only contain str values.
"""
return dict(map(
lambda (k, v): (k, str(v).lower() if isinstance(v, bool) else str(v)),
d.iteritems()
)) | python | {
"resource": ""
} |
q39761 | suppress_stdout | train | def suppress_stdout():
"""
Context manager that suppresses stdout.
Examples:
>>> with suppress_stdout():
... print('Test print')
>>> print('test')
test
"""
save_stdout = sys.stdout
sys.stdout = DevNull()
yield
sys.stdout = save_stdout | python | {
"resource": ""
} |
q39762 | clean_title | train | def clean_title(title):
"""
Clean title -> remove dates, remove duplicated spaces and strip title.
Args:
title (str): Title.
Returns:
str: Clean title without dates, duplicated, trailing and leading spaces.
"""
date_pattern = re.compile(r'\W*'
r'\d{1,2}'
r'[/\-.]'
r'\d{1,2}'
r'[/\-.]'
r'(?=\d*)(?:.{4}|.{2})'
r'\W*')
title = date_pattern.sub(' ', title)
title = re.sub(r'\s{2,}', ' ', title)
title = title.strip()
return title | python | {
"resource": ""
} |
q39763 | get_ext | train | def get_ext(url):
"""
Extract an extension from the url.
Args:
url (str): String representation of a url.
Returns:
str: Filename extension from a url (without a dot), '' if extension is not present.
"""
parsed = urllib.parse.urlparse(url)
root, ext = os.path.splitext(parsed.path)
return ext.lstrip('.') | python | {
"resource": ""
} |
q39764 | delete_duplicates | train | def delete_duplicates(seq):
"""
Remove duplicates from an iterable, preserving the order.
Args:
seq: Iterable of various type.
Returns:
list: List of unique objects.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | python | {
"resource": ""
} |
q39765 | RethinkCollection.joinOn | train | def joinOn(self, model, onIndex):
"""
Performs an eqJoin on with the given model. The resulting join will be
accessible through the models name.
"""
return self._joinOnAsPriv(model, onIndex, model.__name__) | python | {
"resource": ""
} |
q39766 | RethinkCollection.joinOnAs | train | def joinOnAs(self, model, onIndex, whatAs):
"""
Like `joinOn` but allows setting the joined results name to access it
from.
Performs an eqJoin on with the given model. The resulting join will be
accessible through the given name.
"""
return self._joinOnAsPriv(model, onIndex, whatAs) | python | {
"resource": ""
} |
q39767 | RethinkCollection._joinOnAsPriv | train | def _joinOnAsPriv(self, model, onIndex, whatAs):
"""
Private method for handling joins.
"""
if self._join:
raise Exception("Already joined with a table!")
self._join = model
self._joinedField = whatAs
table = model.table
self._query = self._query.eq_join(onIndex, r.table(table))
return self | python | {
"resource": ""
} |
q39768 | RethinkCollection.orderBy | train | def orderBy(self, field, direct="desc"):
"""
Allows for the results to be ordered by a specific field. If given,
direction can be set with passing an additional argument in the form
of "asc" or "desc"
"""
if direct == "desc":
self._query = self._query.order_by(r.desc(field))
else:
self._query = self._query.order_by(r.asc(field))
return self | python | {
"resource": ""
} |
q39769 | RethinkCollection.offset | train | def offset(self, value):
"""
Allows for skipping a specified number of results in query. Useful
for pagination.
"""
self._query = self._query.skip(value)
return self | python | {
"resource": ""
} |
q39770 | RethinkCollection.limit | train | def limit(self, value):
"""
Allows for limiting number of results returned for query. Useful
for pagination.
"""
self._query = self._query.limit(value)
return self | python | {
"resource": ""
} |
q39771 | RethinkCollection.fetch | train | def fetch(self):
"""
Fetches the query and then tries to wrap the data in the model, joining
as needed, if applicable.
"""
returnResults = []
results = self._query.run()
for result in results:
if self._join:
# Because we can tell the models to ignore certian fields,
# through the protectedItems blacklist, we can nest models by
# name and have each one act normal and not accidentally store
# extra data from other models
item = self._model.fromRawEntry(**result["left"])
joined = self._join.fromRawEntry(**result["right"])
item.protectedItems = self._joinedField
item[self._joinedField] = joined
else:
item = self._model.fromRawEntry(**result)
returnResults.append(item)
self._documents = returnResults
return self._documents | python | {
"resource": ""
} |
q39772 | coerce_put_post | train | def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "PUT":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST | python | {
"resource": ""
} |
q39773 | Mimer.loader_for_type | train | def loader_for_type(self, ctype):
"""
Gets a function ref to deserialize content
for a certain mimetype.
"""
for loadee, mimes in Mimer.TYPES.iteritems():
for mime in mimes:
if ctype.startswith(mime):
return loadee | python | {
"resource": ""
} |
q39774 | getsteps | train | def getsteps(levels, tagmax):
""" Returns a list with the max number of posts per "tagcloud level"
"""
ntw = levels
if ntw < 2:
ntw = 2
steps = [(stp, 1 + (stp * int(math.ceil(tagmax * 1.0 / ntw - 1))))
for stp in range(ntw)]
# just to be sure~
steps[-1] = (steps[-1][0], tagmax+1)
return steps | python | {
"resource": ""
} |
q39775 | build | train | def build(site, tagdata):
""" Returns the tag cloud for a list of tags.
"""
tagdata.sort()
# we get the most popular tag to calculate the tags' weigth
tagmax = 0
for tagname, tagcount in tagdata:
if tagcount > tagmax:
tagmax = tagcount
steps = getsteps(site.tagcloud_levels, tagmax)
tags = []
for tagname, tagcount in tagdata:
weight = [twt[0] \
for twt in steps if twt[1] >= tagcount and twt[1] > 0][0]+1
tags.append({'tagname':tagname, 'count':tagcount, 'weight':weight})
return tags | python | {
"resource": ""
} |
q39776 | getquery | train | def getquery(query):
'Performs a query and get the results.'
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except: data = list()
return data | python | {
"resource": ""
} |
q39777 | getcloud | train | def getcloud(site, feed_id=None):
""" Returns the tag cloud for a site or a site's subscriber.
"""
cloudict = fjcache.cache_get(site.id, 'tagclouds')
if not cloudict:
cloudict = cloudata(site)
fjcache.cache_set(site, 'tagclouds', cloudict)
# A subscriber's tag cloud has been requested.
if feed_id:
feed_id = int(feed_id)
if feed_id in cloudict:
return cloudict[feed_id]
return []
# The site tagcloud has been requested.
return cloudict[0] | python | {
"resource": ""
} |
q39778 | extract_stations | train | def extract_stations(page):
'''Extract bus stations from routine page.
:param page: crawled page.
'''
stations = [_(station.value) for station in page('.stateName')]
return {
'terminal': {
stations[0]: list(reversed(stations)),
stations[-1]: stations
},
'stations': stations
} | python | {
"resource": ""
} |
q39779 | extract_current_routine | train | def extract_current_routine(page, stations):
'''Extract current routine information from page.
:param page: crawled page.
:param stations: bus stations list. See `~extract_stations`.
'''
current_routines = CURRENT_ROUTINE_PATTERN.findall(page.text())
if not current_routines:
return
terminal_station = stations['stations'][-1]
for routine in current_routines:
if _(routine[0]) == terminal_station:
distance = int(routine[1])
stations_to_this_dir = stations['terminal'][terminal_station]
waiting_station = _(page('.now .stateName').val())
idx = stations_to_this_dir.index(waiting_station)
bus_station = stations_to_this_dir[idx - distance + 1]
return {
'destinate_station': terminal_station,
'bus_station': bus_station,
'waiting_station': waiting_station,
'distance': distance
} | python | {
"resource": ""
} |
q39780 | extract_bus_routine | train | def extract_bus_routine(page):
'''Extract bus routine information from page.
:param page: crawled page.
'''
if not isinstance(page, pq):
page = pq(page)
stations = extract_stations(page)
return {
# Routine name.
'name': extract_routine_name(page),
# Bus stations.
'stations': stations,
# Current routine.
'current': extract_current_routine(page, stations)
} | python | {
"resource": ""
} |
q39781 | RethinkModel._grabData | train | def _grabData(self, key):
"""
Tries to find the existing document in the database, if it is found,
then the objects _data is set to that document, and this returns
`True`, otherwise this will return `False`
:param key: The primary key of the object we're looking for
:type key: Str
:return: True if a document was found, otherwise False
:rtype: Boolean
"""
rawCursor = r.table(self.table).get(key).run(self._conn)
if rawCursor:
self._data = rawCursor
self._new = False
return True
else:
return False | python | {
"resource": ""
} |
q39782 | RethinkModel.save | train | def save(self):
"""
If an id exists in the database, we assume we'll update it, and if not
then we'll insert it. This could be a problem with creating your own
id's on new objects, however luckily, we keep track of if this is a new
object through a private _new variable, and use that to determine if we
insert or update.
"""
if not self._new:
data = self._data.copy()
ID = data.pop(self.primaryKey)
reply = r.table(self.table).get(ID) \
.update(data,
durability=self.durability,
non_atomic=self.non_atomic) \
.run(self._conn)
else:
reply = r.table(self.table) \
.insert(self._data,
durability=self.durability,
upsert=self.upsert) \
.run(self._conn)
self._new = False
if "generated_keys" in reply and reply["generated_keys"]:
self._data[self.primaryKey] = reply["generated_keys"][0]
if "errors" in reply and reply["errors"] > 0:
raise Exception("Could not insert entry: %s"
% reply["first_error"])
return True | python | {
"resource": ""
} |
q39783 | RethinkModel.delete | train | def delete(self):
"""
Deletes the current instance. This assumes that we know what we're
doing, and have a primary key in our data already. If this is a new
instance, then we'll let the user know with an Exception
"""
if self._new:
raise Exception("This is a new object, %s not in data, \
indicating this entry isn't stored." % self.primaryKey)
r.table(self.table).get(self._data[self.primaryKey]) \
.delete(durability=self.durability).run(self._conn)
return True | python | {
"resource": ""
} |
q39784 | BGTree.add_edge | train | def add_edge(self, node1_name, node2_name, edge_length=DEFAULT_EDGE_LENGTH):
""" Adds a new edge to the current tree with specified characteristics
Forbids addition of an edge, if a parent node is not present
Forbids addition of an edge, if a child node already exists
:param node1_name: name of the parent node, to which an edge shall be added
:param node2_name: name of newly added child node
:param edge_length: a length of specified edge
:return: nothing, inplace changes
:raises: ValueError (if parent node IS NOT present in the tree, or child node IS already present in the tree)
"""
if not self.__has_node(name=node1_name):
raise ValueError("Can not add an edge to a non-existing node {name}".format(name=node1_name))
if self.__has_node(name=node2_name):
raise ValueError("Can not add an edge to already existing node {name}".format(name=node2_name))
self.multicolors_are_up_to_date = False
self.__get_node_by_name(name=node1_name).add_child(name=node2_name, dist=edge_length) | python | {
"resource": ""
} |
q39785 | BGTree.__get_node_by_name | train | def __get_node_by_name(self, name):
""" Returns a first TreeNode object, which name matches the specified argument
:raises: ValueError (if no node with specified name is present in the tree)
"""
try:
for entry in filter(lambda x: x.name == name, self.nodes()):
return entry
except StopIteration:
raise ValueError("Attempted to retrieve a non-existing tree node with name: {name}"
"".format(name=name)) | python | {
"resource": ""
} |
q39786 | BGTree.__has_edge | train | def __has_edge(self, node1_name, node2_name, account_for_direction=True):
""" Returns a boolean flag, telling if a tree has an edge with two nodes, specified by their names as arguments
If a account_for_direction is specified as True, the order of specified node names has to relate to parent - child relation,
otherwise both possibilities are checked
"""
try:
p1 = self.__get_node_by_name(name=node1_name)
wdir = node2_name in (node.name for node in p1.children)
if account_for_direction:
return wdir
else:
p2 = self.__get_node_by_name(name=node2_name)
return wdir or node1_name in (node.name for node in p2.children)
except ValueError:
return False | python | {
"resource": ""
} |
q39787 | BGTree.__get_v_tree_consistent_leaf_based_hashable_multicolors | train | def __get_v_tree_consistent_leaf_based_hashable_multicolors(self):
""" Internally used method, that recalculates VTree-consistent sets of leaves in the current tree """
result = []
nodes = deque([self.__root])
while len(nodes) > 0:
current_node = nodes.popleft()
children = current_node.children
nodes.extend(children)
if not current_node.is_leaf():
leaves = filter(lambda node: node.is_leaf(), current_node.get_descendants())
result.append(Multicolor(*[self.__leaf_wrapper(leaf.name) for leaf in leaves]))
else:
result.append(Multicolor(self.__leaf_wrapper(current_node.name)))
result.append(Multicolor())
return result | python | {
"resource": ""
} |
q39788 | reload | train | def reload(*command, ignore_patterns=[]):
"""Reload given command"""
path = "."
sig = signal.SIGTERM
delay = 0.25
ignorefile = ".reloadignore"
ignore_patterns = ignore_patterns or load_ignore_patterns(ignorefile)
event_handler = ReloadEventHandler(ignore_patterns)
reloader = Reloader(command, signal)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
reloader.start_command()
try:
while True:
time.sleep(delay)
sys.stdout.write(reloader.read())
sys.stdout.flush()
if event_handler.modified:
reloader.restart_command()
except KeyboardInterrupt:
observer.stop()
observer.join()
reloader.stop_command()
sys.stdout.write(reloader.read())
sys.stdout.flush() | python | {
"resource": ""
} |
q39789 | reload_me | train | def reload_me(*args, ignore_patterns=[]):
"""Reload currently running command with given args"""
command = [sys.executable, sys.argv[0]]
command.extend(args)
reload(*command, ignore_patterns=ignore_patterns) | python | {
"resource": ""
} |
q39790 | GRIMMReader.parse_data_string | train | def parse_data_string(data_string):
""" Parses a string assumed to contain gene order data, retrieving information about fragment type, gene order, blocks names and their orientation
First checks if gene order termination signs are present.
Selects the earliest one.
Checks that information preceding is not empty and contains gene order.
Generates results structure by retrieving information about fragment type, blocks names and orientations.
**NOTE:** comment signs do not work in data strings. Rather use the fact that after first gene order termination sign everything is ignored for processing
:param data_string: a string to retrieve gene order information from
:type data_string: ``str``
:return: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted structure corresponding to gene order in supplied data string and containing fragments type
:rtype: ``tuple(str, list((str, str), ...))``
"""
data_string = data_string.strip()
linear_terminator_index = data_string.index("$") if "$" in data_string else -1
circular_terminator_index = data_string.index("@") if "@" in data_string else -1
if linear_terminator_index < 0 and circular_terminator_index < 0:
raise ValueError("Invalid data string. No chromosome termination sign ($|@) found.")
if linear_terminator_index == 0 or circular_terminator_index == 0:
raise ValueError("Invalid data string. No data found before chromosome was terminated.")
if linear_terminator_index < 0 or 0 < circular_terminator_index < linear_terminator_index:
###############################################################################################
#
# we either encountered only a circular chromosome termination sign
# or we have encountered it before we've encountered the circular chromosome termination sign first
#
###############################################################################################
chr_type = "@"
terminator_index = circular_terminator_index
else:
chr_type = "$"
terminator_index = linear_terminator_index
###############################################################################################
#
# everything after first fragment termination sign is omitted
#
###############################################################################################
data = data_string[:terminator_index].strip()
###############################################################################################
#
# genomic blocks are separated between each other by the space character
#
###############################################################################################
split_data = data.split()
blocks = []
for block in split_data:
###############################################################################################
#
# since positively oriented blocks can be denoted both as "+block" as well as "block"
# we need to figure out where "block" name starts
#
###############################################################################################
cut_index = 1 if block.startswith("-") or block.startswith("+") else 0
if cut_index == 1 and len(block) == 1:
###############################################################################################
#
# block can not be empty
# from this one can derive the fact, that names "+" and "-" for blocks are forbidden
#
###############################################################################################
raise ValueError("Empty block name definition")
blocks.append(("-" if block.startswith("-") else "+", block[cut_index:]))
return chr_type, blocks | python | {
"resource": ""
} |
q39791 | GRIMMReader.__assign_vertex_pair | train | def __assign_vertex_pair(block):
""" Assigns usual BreakpointGraph type vertices to supplied block.
Vertices are labeled as "block_name" + "h" and "block_name" + "t" according to blocks orientation.
:param block: information about a genomic block to create a pair of vertices for in a format of ( ``+`` | ``-``, block_name)
:type block: ``(str, str)``
:return: a pair of vertices labeled according to supplied blocks name (respecting blocks orientation)
:rtype: ``(str, str)``
"""
sign, name = block
data = name.split(BlockVertex.NAME_SEPARATOR)
root_name, data = data[0], data[1:]
tags = [entry.split(TaggedVertex.TAG_SEPARATOR) for entry in data]
for tag_entry in tags:
if len(tag_entry) == 1:
tag_entry.append(None)
elif len(tag_entry) > 2:
tag_entry[1:] = [TaggedVertex.TAG_SEPARATOR.join(tag_entry[1:])]
tail, head = root_name + "t", root_name + "h"
tail, head = TaggedBlockVertex(tail), TaggedBlockVertex(head)
tail.mate_vertex = head
head.mate_vertex = tail
for tag, value in tags:
head.add_tag(tag, value)
tail.add_tag(tag, value)
return (tail, head) if sign == "+" else (head, tail) | python | {
"resource": ""
} |
q39792 | GRIMMReader.get_breakpoint_graph | train | def get_breakpoint_graph(stream, merge_edges=True):
""" Taking a file-like object transforms supplied gene order data into the language of
:param merge_edges: a flag that indicates if parallel edges in produced breakpoint graph shall be merged or not
:type merge_edges: ``bool``
:param stream: any iterable object where each iteration produces a ``str`` object
:type stream: ``iterable`` ver ``str``
:return: an instance of a BreakpointGraph that contains information about adjacencies in genome specified in GRIMM formatted input
:rtype: :class:`bg.breakpoint_graph.BreakpointGraph`
"""
result = BreakpointGraph()
current_genome = None
fragment_data = {}
for line in stream:
line = line.strip()
if len(line) == 0:
###############################################################################################
#
# empty lines are omitted
#
###############################################################################################
continue
if GRIMMReader.is_genome_declaration_string(data_string=line):
###############################################################################################
#
# is we have a genome declaration, we must update current genome
# all following gene order data (before EOF or next genome declaration) will be attributed to current genome
#
###############################################################################################
current_genome = GRIMMReader.parse_genome_declaration_string(data_string=line)
fragment_data = {}
elif GRIMMReader.is_comment_string(data_string=line):
if GRIMMReader.is_comment_data_string(string=line):
path, (key, value) = GRIMMReader.parse_comment_data_string(comment_data_string=line)
if len(path) > 0 and path[0] == "fragment":
add_to_dict_with_path(destination_dict=fragment_data, key=key, value=value, path=path)
else:
continue
elif current_genome is not None:
###############################################################################################
#
# gene order information that is specified before the first genome is specified can not be attributed to anything
# and thus omitted
#
###############################################################################################
parsed_data = GRIMMReader.parse_data_string(data_string=line)
edges = GRIMMReader.get_edges_from_parsed_data(parsed_data=parsed_data)
for v1, v2 in edges:
edge_specific_data = {
"fragment": {
"forward_orientation": (v1, v2)
}
}
edge = BGEdge(vertex1=v1, vertex2=v2, multicolor=Multicolor(current_genome), data=deepcopy(fragment_data))
edge.update_data(source=edge_specific_data)
result.add_bgedge(bgedge=edge,
merge=merge_edges)
return result | python | {
"resource": ""
} |
q39793 | BGGenome.from_json | train | def from_json(cls, data, json_schema_class=None):
""" JSON deserialization method that retrieves a genome instance from its json representation
If specific json schema is provided, it is utilized, and if not, a class specific is used
"""
schema = cls.json_schema if json_schema_class is None else json_schema_class()
return schema.load(data) | python | {
"resource": ""
} |
q39794 | send_file | train | def send_file(request, filename, content_type='image/jpeg'):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
wrapper = FixedFileWrapper(file(filename, 'rb'))
response = HttpResponse(wrapper, content_type=content_type)
response['Content-Length'] = os.path.getsize(filename)
return response | python | {
"resource": ""
} |
q39795 | send_zipfile | train | def send_zipfile(request, fileList):
"""
Create a ZIP file on disk and transmit it in chunks of 8KB,
without loading the whole file into memory. A similar approach can
be used for large dynamic PDF files.
"""
temp = tempfile.TemporaryFile()
archive = zipfile.ZipFile(temp, 'w', zipfile.ZIP_DEFLATED)
for artist,files in fileList.iteritems():
for f in files:
archive.write(f[0], '%s/%s' % (artist, f[1]))
archive.close()
wrapper = FixedFileWrapper(temp)
response = HttpResponse(wrapper, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=FrogSources.zip'
response['Content-Length'] = temp.tell()
temp.seek(0)
return response | python | {
"resource": ""
} |
q39796 | enable | train | def enable():
"""
Enable all benchmarking.
"""
Benchmark.enable = True
ComparisonBenchmark.enable = True
BenchmarkedFunction.enable = True
BenchmarkedClass.enable = True | python | {
"resource": ""
} |
q39797 | disable | train | def disable():
"""
Disable all benchmarking.
"""
Benchmark.enable = False
ComparisonBenchmark.enable = False
BenchmarkedFunction.enable = False
BenchmarkedClass.enable = False | python | {
"resource": ""
} |
q39798 | TaggedVertex.add_tag | train | def add_tag(self, tag, value):
""" as tags are kept in a sorted order, a bisection is a fastest way to identify a correct position
of or a new tag to be added. An additional check is required to make sure w don't add duplicates
"""
index = bisect_left(self.tags, (tag, value))
contains = False
if index < len(self.tags):
contains = self.tags[index] == (tag, value)
if not contains:
self.tags.insert(index, (tag, value)) | python | {
"resource": ""
} |
q39799 | BGEdge.colors_json_ids | train | def colors_json_ids(self):
""" A proxy property based access to vertices in current edge
When edge is serialized to JSON object, no explicit object for its multicolor is created, but rather all colors,
taking into account their multiplicity, are referenced by their json_ids.
"""
return [color.json_id if hasattr(color, "json_id") else hash(color) for color in self.multicolor.multicolors.elements()] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.