INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Freeze and shrink the graph based on a checkpoint and the output node names.
def freeze_from_checkpoint(input_checkpoint, output_file_path, output_node_names): """Freeze and shrink the graph based on a checkpoint and the output node names.""" check_input_checkpoint(input_checkpoint) output_node_names = output_node_names_string_as_list(output_node_names) with tf.Session() as sess: restore_from_checkpoint(sess, input_checkpoint) freeze_graph.freeze_graph_with_def_protos(input_graph_def=sess.graph_def, input_saver_def=None, input_checkpoint=input_checkpoint, output_node_names=','.join(output_node_names), restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=output_file_path, clear_devices=True, initializer_nodes='')
Freeze and shrink the graph based on a session and the output node names.
def freeze(sess, output_file_path, output_node_names): """Freeze and shrink the graph based on a session and the output node names.""" with TemporaryDirectory() as temp_dir_name: checkpoint_path = os.path.join(temp_dir_name, 'model.ckpt') tf.train.Saver().save(sess, checkpoint_path) freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
Save a small version of the graph based on a session and the output node names.
def save_graph_only(sess, output_file_path, output_node_names, as_text=False): """Save a small version of the graph based on a session and the output node names.""" for node in sess.graph_def.node: node.device = '' graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names) output_dir, output_filename = os.path.split(output_file_path) graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text)
Save a small version of the graph based on a checkpoint and the output node names.
def save_graph_only_from_checkpoint(input_checkpoint, output_file_path, output_node_names, as_text=False): """Save a small version of the graph based on a checkpoint and the output node names.""" check_input_checkpoint(input_checkpoint) output_node_names = output_node_names_string_as_list(output_node_names) with tf.Session() as sess: restore_from_checkpoint(sess, input_checkpoint) save_graph_only(sess, output_file_path, output_node_names, as_text=as_text)
Save the weights of the trainable variables each one in a different file in output_path.
def save_weights(sess, output_path, conv_var_names=None, conv_transpose_var_names=None): """Save the weights of the trainable variables, each one in a different file in output_path.""" if not conv_var_names: conv_var_names = [] if not conv_transpose_var_names: conv_transpose_var_names = [] for var in tf.trainable_variables(): filename = '{}-{}'.format(output_path, var.name.replace(':', '-').replace('/', '-')) if var.name in conv_var_names: var = tf.transpose(var, perm=[3, 0, 1, 2]) elif var.name in conv_transpose_var_names: var = tf.transpose(var, perm=[3, 1, 0, 2]) value = sess.run(var) # noinspection PyTypeChecker with open(filename, 'w') as file_: value.tofile(file_)
Save the weights of the trainable variables given a checkpoint each one in a different file in output_path.
def save_weights_from_checkpoint(input_checkpoint, output_path, conv_var_names=None, conv_transpose_var_names=None): """Save the weights of the trainable variables given a checkpoint, each one in a different file in output_path.""" check_input_checkpoint(input_checkpoint) with tf.Session() as sess: restore_from_checkpoint(sess, input_checkpoint) save_weights(sess, output_path, conv_var_names=conv_var_names, conv_transpose_var_names=conv_transpose_var_names)
Return a TensorFlow saver from a checkpoint containing the metagraph.
def restore_from_checkpoint(sess, input_checkpoint): """Return a TensorFlow saver from a checkpoint containing the metagraph.""" saver = tf.train.import_meta_graph('{}.meta'.format(input_checkpoint)) saver.restore(sess, input_checkpoint) return saver
Parse the tag instantiate the class.
def parse(cls, parser, token): """ Parse the tag, instantiate the class. :type parser: django.template.base.Parser :type token: django.template.base.Token """ tag_name, args, kwargs = parse_token_kwargs( parser, token, allowed_kwargs=cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs ) cls.validate_args(tag_name, *args, **kwargs) if cls.end_tag_name: kwargs['nodelist'] = parser.parse((cls.end_tag_name,)) parser.delete_first_token() return cls(tag_name, *args, **kwargs)
The default Django render () method for the tag.
def render(self, context): """ The default Django render() method for the tag. This method resolves the filter expressions, and calls :func:`render_tag`. """ # Resolve token kwargs tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs return self.render_tag(context, *tag_args, **tag_kwargs)
Render the tag with all arguments resolved to their actual values.
def render_tag(self, context, *tag_args, **tag_kwargs): """ Render the tag, with all arguments resolved to their actual values. """ raise NotImplementedError("{0}.render_tag() is not implemented!".format(self.__class__.__name__))
Validate the syntax of the template tag.
def validate_args(cls, tag_name, *args, **kwargs): """ Validate the syntax of the template tag. """ if cls.min_args is not None and len(args) < cls.min_args: if cls.min_args == 1: raise TemplateSyntaxError("'{0}' tag requires at least {1} argument".format(tag_name, cls.min_args)) else: raise TemplateSyntaxError("'{0}' tag requires at least {1} arguments".format(tag_name, cls.min_args)) if cls.max_args is not None and len(args) > cls.max_args: if cls.max_args == 0: if cls.allowed_kwargs: raise TemplateSyntaxError("'{0}' tag only allows keywords arguments, for example {1}=\"...\".".format(tag_name, cls.allowed_kwargs[0])) else: raise TemplateSyntaxError("'{0}' tag doesn't support any arguments".format(tag_name)) elif cls.max_args == 1: raise TemplateSyntaxError("'{0}' tag only allows {1} argument.".format(tag_name, cls.max_args)) else: raise TemplateSyntaxError("'{0}' tag only allows {1} arguments.".format(tag_name, cls.max_args))
Return the context data for the included template.
def get_context_data(self, parent_context, *tag_args, **tag_kwargs): """ Return the context data for the included template. """ raise NotImplementedError("{0}.get_context_data() is not implemented.".format(self.__class__.__name__))
Wrap the context data in a: class: ~django. template. Context object.
def get_context(self, parent_context, data): """ Wrap the context data in a :class:`~django.template.Context` object. :param parent_context: The context of the parent template. :type parent_context: :class:`~django.template.Context` :param data: The result from :func:`get_context_data` :type data: dict :return: Context data. :rtype: :class:`~django.template.Context` """ if django.VERSION >= (1, 8): new_context = parent_context.new(data) else: settings = { 'autoescape': parent_context.autoescape, 'current_app': parent_context.current_app, 'use_l10n': parent_context.use_l10n, 'use_tz': parent_context.use_tz, } new_context = Context(data, **settings) # Pass CSRF token for same reasons as @register.inclusion_tag does. csrf_token = parent_context.get('csrf_token', None) if csrf_token is not None: new_context['csrf_token'] = csrf_token return new_context
Rendering of the tag. It either assigns the value as variable or renders it.
def render_tag(self, context, *tag_args, **tag_kwargs): """ Rendering of the tag. It either assigns the value as variable, or renders it. """ if self.as_var: # Assign the value in the parent context context[self.as_var] = self.get_value(context, *tag_args, **tag_kwargs) return u''
Parse the as var syntax.
def parse(cls, parser, token): """ Parse the "as var" syntax. """ bits, as_var = parse_as_var(parser, token) tag_name, args, kwargs = parse_token_kwargs(parser, bits, ('template',) + cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs) # Pass through standard chain cls.validate_args(tag_name, *args) return cls(tag_name, as_var, *args, **kwargs)
Rendering of the tag. It either assigns the value as variable or renders it.
def render_tag(self, context, *tag_args, **tag_kwargs): """ Rendering of the tag. It either assigns the value as variable, or renders it. """ # Be very explicit about which base functionality is used: # Using super() for mixin support will not work nicely anyway here. if self.as_var: # Assign the value in the parent context return BaseAssignmentNode.render_tag(self, context, *tag_args, **tag_kwargs) else: # Render the output using the BaseInclusionNode features return BaseInclusionNode.render_tag(self, context, *tag_args, **tag_kwargs)
Return the context data for the inclusion tag.
def get_context_data(self, parent_context, *tag_args, **tag_kwargs): """ Return the context data for the inclusion tag. Returns ``{'value': self.get_value(parent_context, *tag_args, **tag_kwargs)}`` by default. """ if 'template' not in self.allowed_kwargs: # The overwritten get_value() doesn't have to take care of our customly inserted tag parameters, # It can safely assume passing **tag_kwargs to another function. tag_kwargs.pop('template', None) return { self.context_value_name: self.get_value(parent_context, *tag_args, **tag_kwargs) }
Create a TensorFlow Session from a Caffe model.
def caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name='Graph', conversion_out_dir_path=None, use_padding_same=False): """Create a TensorFlow Session from a Caffe model.""" try: # noinspection PyUnresolvedReferences from caffeflow import convert except ImportError: raise Exception("caffeflow package needs to be installed to freeze Caffe models. Check out the README file.") with (dummy_context_mgr(conversion_out_dir_path) or util.TemporaryDirectory()) as dir_path: params_values_output_path = os.path.join(dir_path, 'params_values.npy') network_output_path = os.path.join(dir_path, 'network.py') convert.convert(caffe_def_path, caffemodel_path, params_values_output_path, network_output_path, False, use_padding_same=use_padding_same) network_module = imp.load_source('module.name', network_output_path) network_class = getattr(network_module, graph_name) network = network_class(inputs) sess = tf.Session() network.load(params_values_output_path, sess) return sess
Freeze and shrink the graph based on a Caffe model the input tensors and the output node names.
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph', conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False): """Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names.""" with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name, conversion_out_dir_path=conversion_out_dir_path, use_padding_same=use_padding_same) as sess: saver = tf.train.Saver() with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path: checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt') saver.save(sess, checkpoint_path) output_node_names = util.output_node_names_string_as_list(output_node_names) tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
Save a small version of the graph based on a Caffe model the input tensors and the output node names.
def save_graph_only(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph', use_padding_same=False): """Save a small version of the graph based on a Caffe model, the input tensors and the output node names.""" with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name, use_padding_same=use_padding_same) as sess: tf_freeze.save_graph_only(sess, output_file_path, output_node_names)
Save the weights of the trainable variables each one in a different file in output_path.
def save_weights(caffe_def_path, caffemodel_path, inputs, output_path, graph_name='Graph', conv_var_names=None, conv_transpose_var_names=None, use_padding_same=False): """Save the weights of the trainable variables, each one in a different file in output_path.""" with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name, use_padding_same=use_padding_same) as sess: tf_freeze.save_weights(sess, output_path, conv_var_names=conv_var_names, conv_transpose_var_names=conv_transpose_var_names)
Make a sequence into rows of num_columns columns.
def make_rows(num_columns, seq): """ Make a sequence into rows of num_columns columns. >>> tuple(make_rows(2, [1, 2, 3, 4, 5])) ((1, 4), (2, 5), (3, None)) >>> tuple(make_rows(3, [1, 2, 3, 4, 5])) ((1, 3, 5), (2, 4, None)) """ # calculate the minimum number of rows necessary to fit the list in # num_columns Columns num_rows, partial = divmod(len(seq), num_columns) if partial: num_rows += 1 # break the seq into num_columns of length num_rows try: result = more_itertools.grouper(seq, num_rows) except TypeError: # more_itertools before 6.x result = more_itertools.grouper(num_rows, seq) # result is now a list of columns... transpose it to return a list # of rows return zip(*result)
Split a sequence into two sequences: the first is elements that return False for func ( element ) and the second for True for func ( element ). By default func is bool so uses the truth value of the object.
def bisect(seq, func=bool): """ Split a sequence into two sequences: the first is elements that return False for func(element) and the second for True for func(element). By default, func is ``bool``, so uses the truth value of the object. >>> is_odd = lambda n: n%2 >>> even, odd = bisect(range(5), is_odd) >>> list(odd) [1, 3] >>> list(even) [0, 2, 4] >>> other, zeros = bisect(reversed(range(5))) >>> list(zeros) [0] >>> list(other) [4, 3, 2, 1] """ queues = GroupbySaved(seq, func) return queues.get_first_n_queues(2)
Take a sequence and break it up into chunks of the specified size. The last chunk may be smaller than size.
def grouper_nofill_str(n, iterable): """ Take a sequence and break it up into chunks of the specified size. The last chunk may be smaller than size. This works very similar to grouper_nofill, except it works with strings as well. >>> tuple(grouper_nofill_str(3, 'foobarbaz')) ('foo', 'bar', 'baz') You can still use it on non-strings too if you like. >>> tuple(grouper_nofill_str(42, [])) () >>> tuple(grouper_nofill_str(3, list(range(10)))) ([0, 1, 2], [3, 4, 5], [6, 7, 8], [9]) """ res = more_itertools.chunked(iterable, n) if isinstance(iterable, six.string_types): res = (''.join(item) for item in res) return res
* Deprecated *: Use more_itertools. collapse instead.
def flatten(subject, test=None): """ *Deprecated*: Use more_itertools.collapse instead. """ warnings.warn( "Use more_itertools.collapse instead", DeprecationWarning, stacklevel=2) return list(more_itertools.collapse(subject, base_type=(bytes,)))
Yield every other item from the iterable
def every_other(iterable): """ Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g' """ items = iter(iterable) while True: try: yield next(items) next(items) except StopIteration: return
Given an iterable with items that may come in as sequential duplicates remove those duplicates.
def remove_duplicates(iterable, key=None): """ Given an iterable with items that may come in as sequential duplicates, remove those duplicates. Unlike unique_justseen, this function does not remove triplicates. >>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc')) 'a b c a b c a a b b c c b c b c' >>> ' '.join(remove_duplicates('aaaabbbbb')) 'a a b b b' """ return itertools.chain.from_iterable(six.moves.map( every_other, six.moves.map( operator.itemgetter(1), itertools.groupby(iterable, key) )))
Get the next value from an iterable but also return an iterable that will subsequently return that value and the rest of the original iterable.
def peek(iterable): """ Get the next value from an iterable, but also return an iterable that will subsequently return that value and the rest of the original iterable. >>> l = iter([1,2,3]) >>> val, l = peek(l) >>> val 1 >>> list(l) [1, 2, 3] """ peeker, original = itertools.tee(iterable) return next(peeker), original
Like takewhile but takes a peekable iterable and doesn t consume the non - matching item.
def takewhile_peek(predicate, iterable): """ Like takewhile, but takes a peekable iterable and doesn't consume the non-matching item. >>> items = Peekable(range(10)) >>> is_small = lambda n: n < 4 >>> small_items = takewhile_peek(is_small, items) >>> list(small_items) [0, 1, 2, 3] >>> list(items) [4, 5, 6, 7, 8, 9] >>> empty = takewhile_peek(is_small, Peekable([])) >>> list(empty) [] >>> items = Peekable([3]) >>> small_items = takewhile_peek(is_small, items) >>> list(small_items) [3] >>> list(items) [] >>> items = Peekable([4]) >>> small_items = takewhile_peek(is_small, items) >>> list(small_items) [] >>> list(items) [4] """ while True: try: if not predicate(iterable.peek()): break yield next(iterable) except StopIteration: break
Like pairwise except returns n - tuples of adjacent items. s - > ( s0 s1... sn ) ( s1 s2... s ( n + 1 ))...
def nwise(iter, n): """ Like pairwise, except returns n-tuples of adjacent items. s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ... """ iterset = [iter] while len(iterset) < n: iterset[-1:] = itertools.tee(iterset[-1]) next(iterset[-1], None) return six.moves.zip(*iterset)
Given an iterable return a new iterable which yields triples of ( pre item post ) where pre and post are the items preceeding and following the item ( or None if no such item is appropriate ). pre and post will always be pre_size and post_size in length.
def window(iter, pre_size=1, post_size=1): """ Given an iterable, return a new iterable which yields triples of (pre, item, post), where pre and post are the items preceeding and following the item (or None if no such item is appropriate). pre and post will always be pre_size and post_size in length. >>> example = window(range(10), pre_size=2) >>> pre, item, post = next(example) >>> pre (None, None) >>> post (1,) >>> next(example) ((None, 0), 1, (2,)) >>> list(example)[-1] ((7, 8), 9, (None,)) """ pre_iter, iter = itertools.tee(iter) pre_iter = itertools.chain((None,) * pre_size, pre_iter) pre_iter = nwise(pre_iter, pre_size) post_iter, iter = itertools.tee(iter) post_iter = itertools.chain(post_iter, (None,) * post_size) post_iter = nwise(post_iter, post_size) next(post_iter, None) return six.moves.zip(pre_iter, iter, post_iter)
Given the total number of items determine the number of items that can be added to each bin with a limit on the bin size.
def partition_items(count, bin_size): """ Given the total number of items, determine the number of items that can be added to each bin with a limit on the bin size. So if you want to partition 11 items into groups of 3, you'll want three of three and one of two. >>> partition_items(11, 3) [3, 3, 3, 2] But if you only have ten items, you'll have two groups of three and two of two. >>> partition_items(10, 3) [3, 3, 2, 2] """ num_bins = int(math.ceil(count / float(bin_size))) bins = [0] * num_bins for i in range(count): bins[i % num_bins] += 1 return bins
Like grouper but balance the rows to minimize fill per row. balanced_rows ( 3 ABCDEFG x ) -- > ABC DEx FGx
def balanced_rows(n, iterable, fillvalue=None): """ Like grouper, but balance the rows to minimize fill per row. balanced_rows(3, 'ABCDEFG', 'x') --> ABC DEx FGx" """ iterable, iterable_copy = itertools.tee(iterable) count = len(tuple(iterable_copy)) for allocation in partition_items(count, n): row = itertools.islice(iterable, allocation) if allocation < n: row = itertools.chain(row, [fillvalue]) yield tuple(row)
Given an object always return an iterable. If the item is not already iterable return a tuple containing only the item. If item is None an empty iterable is returned.
def always_iterable(item): """ Given an object, always return an iterable. If the item is not already iterable, return a tuple containing only the item. If item is None, an empty iterable is returned. >>> always_iterable([1,2,3]) <list_iterator...> >>> always_iterable('foo') <tuple_iterator...> >>> always_iterable(None) <tuple_iterator...> >>> always_iterable(range(10)) <range_iterator...> >>> def _test_func(): yield "I'm iterable" >>> print(next(always_iterable(_test_func()))) I'm iterable Although mappings are iterable, treat each like a singleton, as it's more like an object than a sequence. >>> next(always_iterable(dict(a=1))) {'a': 1} """ base_types = six.text_type, bytes, collections.abc.Mapping return more_itertools.always_iterable(item, base_type=base_types)
Call each callable in callables suppressing any exceptions supplied. If no exception classes are supplied all Exceptions will be suppressed.
def suppress_exceptions(callables, *exceptions): """ Call each callable in callables, suppressing any exceptions supplied. If no exception classes are supplied, all Exceptions will be suppressed. >>> import functools >>> c1 = functools.partial(int, 'a') >>> c2 = functools.partial(int, '10') >>> list(suppress_exceptions((c1, c2))) [10] >>> list(suppress_exceptions((c1, c2), KeyError)) Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'a' """ if not exceptions: exceptions = Exception, for callable in callables: try: yield callable() except exceptions: pass
Yield duplicate items from any number of sorted iterables of items
def duplicates(*iterables, **kwargs): """ Yield duplicate items from any number of sorted iterables of items >>> items_a = [1, 2, 3] >>> items_b = [0, 3, 4, 5, 6] >>> list(duplicates(items_a, items_b)) [(3, 3)] It won't behave as you expect if the iterables aren't ordered >>> items_b.append(1) >>> list(duplicates(items_a, items_b)) [(3, 3)] >>> list(duplicates(items_a, sorted(items_b))) [(1, 1), (3, 3)] This function is most interesting when it's operating on a key of more complex objects. >>> items_a = [dict(email='joe@example.com', id=1)] >>> items_b = [dict(email='joe@example.com', id=2), dict(email='other')] >>> dupe, = duplicates(items_a, items_b, key=operator.itemgetter('email')) >>> dupe[0]['email'] == dupe[1]['email'] == 'joe@example.com' True >>> dupe[0]['id'] 1 >>> dupe[1]['id'] 2 """ key = kwargs.pop('key', lambda x: x) assert not kwargs zipped = more_itertools.collate(*iterables, key=key) grouped = itertools.groupby(zipped, key=key) groups = ( tuple(g) for k, g in grouped ) def has_dupes(group): return len(group) > 1 return filter(has_dupes, groups)
Assert that for all items in the iterable they re in order based on comp
def assert_ordered(iterable, key=lambda x: x, comp=operator.le): """ Assert that for all items in the iterable, they're in order based on comp >>> list(assert_ordered(range(5))) [0, 1, 2, 3, 4] >>> list(assert_ordered(range(5), comp=operator.ge)) Traceback (most recent call last): ... AssertionError: 0 < 1 >>> list(assert_ordered(range(5, 0, -1), key=operator.neg)) [5, 4, 3, 2, 1] """ err_tmpl = ( "{pair[0]} > {pair[1]}" if comp is operator.le else "{pair[0]} < {pair[1]}" if comp is operator.ge else "not {comp} {pair}" ) for pair in more_itertools.pairwise(iterable): keyed = tuple(map(key, pair)) assert comp(*keyed), err_tmpl.format(**locals()) yield pair[0] yield pair[1]
Given revision sets old and new each containing a series of revisions of some set of objects collate them based on these rules:
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new): """ Given revision sets old and new, each containing a series of revisions of some set of objects, collate them based on these rules: - all items from each set are yielded in stable order - items in old are yielded first - items in new are yielded last - items that match are yielded in the order in which they appear, giving preference to new Items match based on the 'key' parameter (identity by default). Items are merged using the 'merge' function, which accepts the old and new items to be merged (returning new by default). This algorithm requires fully materializing both old and new in memory. >>> rev1 = ['a', 'b', 'c'] >>> rev2 = ['a', 'd', 'c'] >>> result = list(collate_revs(rev1, rev2)) 'd' must appear before 'c' >>> result.index('d') < result.index('c') True 'b' must appear before 'd' because it came chronologically first. >>> result.index('b') < result.index('d') True >>> result ['a', 'b', 'd', 'c'] >>> list(collate_revs(['a', 'b', 'c'], ['d'])) ['a', 'b', 'c', 'd'] >>> list(collate_revs(['b', 'a'], ['a', 'b'])) ['a', 'b'] >>> list(collate_revs(['a', 'c'], ['a', 'b', 'c'])) ['a', 'b', 'c'] Given two sequences of things out of order, regardless of which order in which the items are merged, all keys should always be merged. >>> from more_itertools import consume >>> left_items = ['a', 'b', 'c'] >>> right_items = ['a', 'c', 'b'] >>> consume(collate_revs(left_items, right_items, merge=print)) a a c c b b >>> consume(collate_revs(right_items, left_items, merge=print)) a a b b c c The merge should not suppress non-True items: >>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print)) None None <BLANKLINE> 0 0 """ missing = object() def maybe_merge(*items): """ Merge any non-null items """ def not_missing(ob): return ob is not missing return functools.reduce(merge, filter(not_missing, items)) new_items = collections.OrderedDict( (key(el), el) for el in new ) old_items = collections.OrderedDict( (key(el), el) for el in old ) # use the old_items as a reference for old_key, old_item in _mutable_iter(old_items): if old_key not in new_items: yield old_item continue # yield all new items that appear before the matching key before, match_new, new_items = _swap_on_miss( partition_dict(new_items, old_key)) for new_key, new_item in before.items(): # ensure any new keys are merged with previous items if # they exist yield maybe_merge(new_item, old_items.pop(new_key, missing)) yield merge(old_item, match_new) # finally, yield whatever is leftover # yield from new_items.values() for item in new_items.values(): yield item
Iterate over items in the dict yielding the first one but allowing it to be mutated during the process. >>> d = dict ( a = 1 ) >>> it = _mutable_iter ( d ) >>> next ( it ) ( a 1 ) >>> d {} >>> d. update ( b = 2 ) >>> list ( it ) [ ( b 2 ) ]
def _mutable_iter(dict): """ Iterate over items in the dict, yielding the first one, but allowing it to be mutated during the process. >>> d = dict(a=1) >>> it = _mutable_iter(d) >>> next(it) ('a', 1) >>> d {} >>> d.update(b=2) >>> list(it) [('b', 2)] """ while dict: prev_key = next(iter(dict)) yield prev_key, dict.pop(prev_key)
Given a partition_dict result if the partition missed swap the before and after.
def _swap_on_miss(partition_result): """ Given a partition_dict result, if the partition missed, swap the before and after. """ before, item, after = partition_result return (before, item, after) if item else (after, item, before)
Given an ordered dictionary of items and a key in that dict return an ordered dict of items before the keyed item and an ordered dict of items after.
def partition_dict(items, key): """ Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) [] """ def unmatched(pair): test_key, item, = pair return test_key != key items_iter = iter(items.items()) item = items.get(key) left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter)) right = collections.OrderedDict(items_iter) return left, item, right
Run through the sequence until n queues are created and return them. If fewer are created return those plus empty iterables to compensate.
def get_first_n_queues(self, n): """ Run through the sequence until n queues are created and return them. If fewer are created, return those plus empty iterables to compensate. """ try: while len(self.queues) < n: self.__fetch__() except StopIteration: pass values = list(self.queues.values()) missing = n - len(values) values.extend(iter([]) for n in range(missing)) return values
Resets the iterator to the start.
def reset(self): """ Resets the iterator to the start. Any remaining values in the current iteration are discarded. """ self.__iterator, self.__saved = itertools.tee(self.__saved)
Parse the remainder of the token to find a as varname statement.
def parse_as_var(parser, token): """ Parse the remainder of the token, to find a "as varname" statement. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits """ if isinstance(token, Token): bits = token.split_contents() else: bits = token as_var = None if len(bits) > 2 and bits[-2] == 'as': bits = bits[:] as_var = bits.pop() bits.pop() # as keyword return bits, as_var
Allow the template tag arguments to be like a normal Python function with * args and ** kwargs.
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True): """ Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs) """ if isinstance(token, Token): bits = token.split_contents() else: bits = token expect_kwarg = False args = [] kwargs = {} prev_bit = None tag_name = bits[0] for bit in bits[1::]: kwarg_match = kwarg_re.match(bit) if kwarg_match: # Keyword argument expect_kwarg = True (name, expr) = bit.split('=', 2) kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr else: # Still at positioned arguments. if expect_kwarg: raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit)) args.append(parser.compile_filter(bit) if compile_args else bit) prev_bit = bit # Validate the allowed arguments, to make things easier for template developers if allowed_kwargs is not None and kwargs: if not allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.") for name in kwargs: if name not in allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs))) return tag_name, args, kwargs
Decorator to register class tags
def template_tag(library, name): """ Decorator to register class tags :param library: The template tag library, typically instantiated as ``register = Library()``. :type library: :class:`~django.template.Library` :param name: The name of the template tag :type name: str Example: .. code-block:: python @template_tag(register, 'my_tag') class MyTag(BaseNode): pass """ def _inner(cls): if hasattr(cls, 'parse'): compile_function = cls.parse else: # Hope that it's either a function, or cls with __init__(self, parser, token) method. compile_function = cls library.tag(name, compile_function) return cls # Return the class body to keep it in the namespace of the module return _inner
A descendant is a child many steps down.
def descendant(self, chain_path): """ A descendant is a child many steps down. """ public_child = self.hdkeychain chain_step_bytes = 4 max_bits_per_step = 2**31 chain_steps = [ int(chain_path[i:i+chain_step_bytes*2], 16) % max_bits_per_step for i in range(0, len(chain_path), chain_step_bytes*2) ] for step in chain_steps: public_child = public_child.get_child(step) return PublicKeychain(public_child)
Derived from code from pybitcointools ( https:// github. com/ vbuterin/ pybitcointools ) by Vitalik Buterin
def bip32_serialize(rawtuple): """ Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ vbytes, depth, fingerprint, i, chaincode, key = rawtuple i = encode(i, 256, 4) chaincode = encode(hash_to_int(chaincode), 256, 32) keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58)
Derived from code from pybitcointools ( https:// github. com/ vbuterin/ pybitcointools ) by Vitalik Buterin
def bip32_deserialize(data): """ Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ dbin = changebase(data, 58, 256) if bin_dbl_sha256(dbin[:-4])[:4] != dbin[-4:]: raise Exception("Invalid checksum") vbytes = dbin[0:4] depth = from_byte_to_int(dbin[4]) fingerprint = dbin[5:9] i = decode(dbin[9:13], 256) chaincode = dbin[13:45] key = dbin[46:78]+b'\x01' if vbytes in PRIVATE else dbin[45:78] return (vbytes, depth, fingerprint, i, chaincode, key)
: return: List of table names in the database.: rtype: list
def fetch_table_names(self, include_system_table=False): """ :return: List of table names in the database. :rtype: list """ result = self.__cur.execute("SELECT name FROM sqlite_master WHERE TYPE='table'") if result is None: return [] table_names = [record[0] for record in result.fetchall()] if include_system_table: return table_names return [table for table in table_names if table not in SQLITE_SYSTEM_TABLES]
Get sqlite_master table information as a list of dictionaries.
def fetch_sqlite_master(self): """ Get sqlite_master table information as a list of dictionaries. :return: sqlite_master table information. :rtype: list :Sample Code: .. code:: python from sqliteschema import SQLiteSchemaExtractor print(json.dumps(SQLiteSchemaExtractor("sample.sqlite").fetch_sqlite_master(), indent=4)) :Output: .. code-block:: json [ { "tbl_name": "sample_table", "sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)", "type": "table", "name": "sample_table", "rootpage": 2 }, { "tbl_name": "sample_table", "sql": "CREATE INDEX sample_table_a_index ON sample_table('a')", "type": "index", "name": "sample_table_a_index", "rootpage": 3 } ] """ sqlite_master_record_list = [] result = self.__cur.execute( "SELECT {:s} FROM sqlite_master".format(", ".join(self._SQLITE_MASTER_ATTR_NAME_LIST)) ) for record in result.fetchall(): sqlite_master_record_list.append( dict( [ [attr_name, item] for attr_name, item in zip(self._SQLITE_MASTER_ATTR_NAME_LIST, record) ] ) ) return sqlite_master_record_list
Yields each node of object graph in postorder.
def object_iter(obj, parent=None, parent_key=None, idx=None, siblings=None): """Yields each node of object graph in postorder.""" obj_node = Node(value=obj, parent=parent, parent_key=parent_key, siblings=siblings, idx=idx) if isinstance(obj, list): _siblings = len(obj) for i, elem in enumerate(obj): for node in object_iter(elem, obj_node, None, i + 1, _siblings): yield node elif isinstance(obj, collections.Mapping): for key in obj: for node in object_iter(obj[key], obj_node, key): yield node yield obj_node
Appy selector to obj and return matching nodes.
def select(selector, obj): """Appy selector to obj and return matching nodes. If only one node is found, return it, otherwise return a list of matches. Returns False on syntax error. None if no results found. """ parser = Parser(obj) try: return parser.parse(selector) except SelectorSyntaxError as e: log.exception(e) return False
Accept a list of tokens. Returns matched nodes of self. obj.
def parse(self, selector): """Accept a list of tokens. Returns matched nodes of self.obj.""" log.debug(self.obj) tokens = lex(selector) if self.peek(tokens, 'operator') == '*': self.match(tokens, 'operator') results = list(object_iter(self.obj)) else: results = self.selector_production(tokens) results = [node.value for node in results] # single results should be returned as a primitive if len(results) == 1: return results[0] elif not len(results): return None return results
Production for a full selector.
def selector_production(self, tokens): """Production for a full selector.""" validators = [] # the following productions should return predicate functions. if self.peek(tokens, 'type'): type_ = self.match(tokens, 'type') validators.append(self.type_production(type_)) if self.peek(tokens, 'identifier'): key = self.match(tokens, 'identifier') validators.append(self.key_production(key)) if self.peek(tokens, 'pclass'): pclass = self.match(tokens, 'pclass') validators.append(self.pclass_production(pclass)) if self.peek(tokens, 'nth_func'): nth_func = self.match(tokens, 'nth_func') validators.append(self.nth_child_production(nth_func, tokens)) if self.peek(tokens, 'pclass_func'): pclass_func = self.match(tokens, 'pclass_func') validators.append(self.pclass_func_production(pclass_func, tokens)) if not len(validators): raise SelectorSyntaxError('no selector recognized.') # apply validators from a selector expression to self.obj results = self._match_nodes(validators, self.obj) if self.peek(tokens, 'operator'): operator = self.match(tokens, 'operator') rvals = self.selector_production(tokens) if operator == ',': results.extend(rvals) elif operator == '>': results = self.parents(results, rvals) elif operator == '~': results = self.siblings(results, rvals) elif operator == ' ': results = self.ancestors(results, rvals) else: raise SelectorSyntaxError("unrecognized operator '%s'" % operator) else: if len(tokens): rvals = self.selector_production(tokens) results = self.ancestors(results, rvals) return results
Find nodes in rhs which have parents in lhs.
def parents(self, lhs, rhs): """Find nodes in rhs which have parents in lhs.""" return [node for node in rhs if node.parent in lhs]
Return nodes from rhs which have ancestors in lhs.
def ancestors(self, lhs, rhs): """Return nodes from rhs which have ancestors in lhs.""" def _search(node): if node in lhs: return True if not node.parent: return False return _search(node.parent) return [node for node in rhs if _search(node)]
Find nodes in rhs having common parents in lhs.
def siblings(self, lhs, rhs): """Find nodes in rhs having common parents in lhs.""" parents = [node.parent for node in lhs] return [node for node in rhs if node.parent in parents]
Parse args and pass them to pclass_func_validator.
def nth_child_production(self, lexeme, tokens): """Parse args and pass them to pclass_func_validator.""" args = self.match(tokens, 'expr') pat = self.nth_child_pat.match(args) if pat.group(5): a = 2 b = 1 if pat.group(5) == 'odd' else 0 elif pat.group(6): a = 0 b = int(pat.group(6)) else: sign = pat.group(1) if pat.group(1) else '+' coef = pat.group(2) if pat.group(2) else '1' a = eval(sign + coef) b = eval(pat.group(3) + pat.group(4)) if pat.group(3) else 0 reverse = False if lexeme == 'nth-last-child': reverse = True def validate(node): """This crazy function taken from jsonselect.js:444.""" if not node.siblings: return False idx = node.idx - 1 tot = node.siblings if reverse: idx = tot - idx else: idx += 1 if a == 0: m = b == idx else: mod = (idx - b) % a m = not mod and (idx * a + b) >= 0 return m return validate
Apply each validator in validators to each node in obj.
def _match_nodes(self, validators, obj): """Apply each validator in validators to each node in obj. Return each node in obj which matches all validators. """ results = [] for node in object_iter(obj): if all([validate(node) for validate in validators]): results.append(node) return results
Sends ICMP echo requests to destination dst count times. Returns a deferred which fires when responses are finished.
def ping(dst, count, inter=0.2, maxwait=1000, size=64): """Sends ICMP echo requests to destination `dst` `count` times. Returns a deferred which fires when responses are finished. """ def _then(result, p): p.stopListening() return result d = defer.Deferred() p = ICMPPort(0, ICMPPing(d, dst, count, inter, maxwait, size), "", 8192, reactor) p.startListening() return d.addCallback(_then, p)
Make an HTTP request and return the body
def getBody(self, url, method='GET', headers={}, data=None, socket=None): """Make an HTTP request and return the body """ if not 'User-Agent' in headers: headers['User-Agent'] = ['Tensor HTTP checker'] return self.request(url, method, headers, data, socket)
Expire any items in the cache older than age seconds
def expire(self, age): """Expire any items in the cache older than `age` seconds""" now = time.time() cache = self._acquire_cache() expired = [k for k, v in cache.items() if (now - v[0]) > age] for k in expired: if k in cache: del cache[k] if k in self.store: del self.store[k] self._write_cache(cache)
Set a key k to value v
def set(self, k, v): """Set a key `k` to value `v`""" self.store[k] = (time.time(), v) self._persist()
Returns key contents and modify time
def get(self, k): """Returns key contents, and modify time""" if self._changed(): self._read() if k in self.store: return tuple(self.store[k]) else: return None
Return True if key k exists
def contains(self, k): """Return True if key `k` exists""" if self._changed(): self._read() return k in self.store.keys()
Given a record timestamp verify the chain integrity.
def chain_check(cls, timestamp: int) -> bool: """ Given a record timestamp, verify the chain integrity. :param timestamp: UNIX time / POSIX time / Epoch time :return: 'True' if the timestamp fits the chain. 'False' otherwise. """ # Creation is messy. # You want genius, you get madness; two sides of the same coin. # ... I'm sure this can be cleaned up. However, let's test it first. record = cls.get_record(timestamp) if isinstance(record, NistBeaconValue) is False: # Don't you dare try to play me return False prev_record = cls.get_previous(record.timestamp) next_record = cls.get_next(record.timestamp) if prev_record is None and next_record is None: # Uh, how did you manage to do this? # I'm not even mad, that's amazing. return False if ( isinstance(prev_record, NistBeaconValue) and isinstance(next_record, NistBeaconValue) ): # Majority case, somewhere in the middle of the chain # True if: # - All three records have proper signatures # - The requested record's previous output equals previous # - The next possible record's previous output equals the record return ( record.valid_signature and prev_record.valid_signature and next_record.valid_signature and record.previous_output_value == prev_record.output_value and next_record.previous_output_value == record.output_value ) if ( prev_record is None and isinstance(next_record, NistBeaconValue) ): # Edge case, this was potentially the first record of all time return ( record.valid_signature and next_record.valid_signature and cls._INIT_RECORD == record and next_record.previous_output_value == record.output_value ) if ( isinstance(prev_record, NistBeaconValue) and next_record is None ): # Edge case, this was potentially the latest and greatest return ( record.valid_signature and prev_record.valid_signature and record.previous_output_value == prev_record.output_value )
Get the first ( oldest ) record available. Since the first record IS a known value in the system we can load it from constants.
def get_first_record( cls, download: bool=True ) -> NistBeaconValue: """ Get the first (oldest) record available. Since the first record IS a known value in the system we can load it from constants. :param download: 'True' will always reach out to NIST to get the first record. 'False' returns a local copy. :return: The first beacon value. 'None' otherwise. """ if download: return NistBeacon.get_record(cls._INIT_RECORD.timestamp) else: return NistBeaconValue.from_json(cls._INIT_RECORD.json)
Convert a string of JSON which represents a NIST randomness beacon value into a NistBeaconValue object.
def from_json(cls, input_json: str) -> 'NistBeaconValue': """ Convert a string of JSON which represents a NIST randomness beacon value into a 'NistBeaconValue' object. :param input_json: JSON to build a 'Nist RandomnessBeaconValue' from :return: A 'NistBeaconValue' object, 'None' otherwise """ try: data_dict = json.loads(input_json) except ValueError: return None # Our required values are "must haves". This makes it simple # to verify we loaded everything out of JSON correctly. required_values = { cls._KEY_FREQUENCY: None, cls._KEY_OUTPUT_VALUE: None, cls._KEY_PREVIOUS_OUTPUT_VALUE: None, cls._KEY_SEED_VALUE: None, cls._KEY_SIGNATURE_VALUE: None, cls._KEY_STATUS_CODE: None, cls._KEY_TIMESTAMP: None, cls._KEY_VERSION: None, } for key in required_values: if key in data_dict: required_values[key] = data_dict[key] # Confirm that the required values are set, and not 'None' if None in required_values.values(): return None # We have all the required values, return a node object return cls( version=required_values[cls._KEY_VERSION], frequency=int(required_values[cls._KEY_FREQUENCY]), timestamp=int(required_values[cls._KEY_TIMESTAMP]), seed_value=required_values[cls._KEY_SEED_VALUE], previous_output_value=required_values[ cls._KEY_PREVIOUS_OUTPUT_VALUE ], signature_value=required_values[cls._KEY_SIGNATURE_VALUE], output_value=required_values[cls._KEY_OUTPUT_VALUE], status_code=required_values[cls._KEY_STATUS_CODE], )
Convert a string of XML which represents a NIST Randomness Beacon value into a NistBeaconValue object.
def from_xml(cls, input_xml: str) -> 'NistBeaconValue': """ Convert a string of XML which represents a NIST Randomness Beacon value into a 'NistBeaconValue' object. :param input_xml: XML to build a 'NistBeaconValue' from :return: A 'NistBeaconValue' object, 'None' otherwise """ invalid_result = None understood_namespaces = { 'nist-0.1': 'http://beacon.nist.gov/record/0.1/', } # Our required values are "must haves". This makes it simple # to verify we loaded everything out of XML correctly. required_values = { cls._KEY_FREQUENCY: None, cls._KEY_OUTPUT_VALUE: None, cls._KEY_PREVIOUS_OUTPUT_VALUE: None, cls._KEY_SEED_VALUE: None, cls._KEY_SIGNATURE_VALUE: None, cls._KEY_STATUS_CODE: None, cls._KEY_TIMESTAMP: None, cls._KEY_VERSION: None, } # First attempt to load the xml, return 'None' on ParseError try: tree = ElementTree.ElementTree(ElementTree.fromstring(input_xml)) except ElementTree.ParseError: return invalid_result # Using the required values, let's load the xml values in for key in required_values: discovered_element = tree.find( "{0}:{1}".format('nist-0.1', key), namespaces=understood_namespaces, ) if not isinstance(discovered_element, ElementTree.Element): continue # Bad pylint message - https://github.com/PyCQA/pylint/issues/476 # pylint: disable=no-member required_values[key] = discovered_element.text # Confirm that the required values are set, and not 'None' if None in required_values.values(): return invalid_result # We have all the required values, return a node object return cls( version=required_values[cls._KEY_VERSION], frequency=int(required_values[cls._KEY_FREQUENCY]), timestamp=int(required_values[cls._KEY_TIMESTAMP]), seed_value=required_values[cls._KEY_SEED_VALUE], previous_output_value=required_values[ cls._KEY_PREVIOUS_OUTPUT_VALUE ], signature_value=required_values[cls._KEY_SIGNATURE_VALUE], output_value=required_values[cls._KEY_OUTPUT_VALUE], status_code=required_values[cls._KEY_STATUS_CODE], )
Returns a minified version of the javascript content
def rendered_content(self): """Returns a 'minified' version of the javascript content""" template = self.resolve_template(self.template_name) if django.VERSION[1] < 8: if template.name.endswith('.min'): return super(MinifiedJsTemplateResponse, self).rendered_content else: if template.template.name.endswith('.min'): return super(MinifiedJsTemplateResponse, self).rendered_content # if no minified template exists, minify the response content = super(MinifiedJsTemplateResponse, self).rendered_content content = jsmin.jsmin(content) return content
Passes each parsed log line to fn This is a better idea than storing a giant log file in memory
def get_fn(self, fn, max_lines=None): """Passes each parsed log line to `fn` This is a better idea than storing a giant log file in memory """ stat = os.stat(self.logfile) if (stat.st_ino == self.lastInode) and (stat.st_size == self.lastSize): # Nothing new return [] # Handle rollover and rotations vaguely if (stat.st_ino != self.lastInode) or (stat.st_size < self.lastSize): self.lastSize = 0 fi = open(self.logfile, 'rt') fi.seek(self.lastSize) self.lastInode = stat.st_ino lines = 0 for i in fi: lines += 1 if max_lines and (lines > max_lines): self.storeLast() fi.close() return if '\n' in i: self.lastSize += len(i) if self.parser: line = self.parser(i.strip('\n')) else: line = i.strip('\n') fn(line) self.storeLast() fi.close()
Returns a big list of all log lines since the last run
def get(self, max_lines=None): """Returns a big list of all log lines since the last run """ rows = [] self.get_fn(lambda row: rows.append(row), max_lines=max_lines) return rows
Create a token referencing the object id with extra data.
def create_token(self, obj_id, extra_data): """Create a token referencing the object id with extra data. Note random data is added to ensure that no two tokens are identical. """ return self.dumps( dict( id=obj_id, data=extra_data, rnd=binascii.hexlify(os.urandom(4)).decode('utf-8') ) )
Validate secret link token.
def validate_token(self, token, expected_data=None): """Validate secret link token. :param token: Token value. :param expected_data: A dictionary of key/values that must be present in the data part of the token (i.e. included via ``extra_data`` in ``create_token``). """ try: # Load token and remove random data. data = self.load_token(token) # Compare expected data with data in token. if expected_data: for k in expected_data: if expected_data[k] != data["data"].get(k): return None return data except BadData: return None
Load data in a token.
def load_token(self, token, force=False): """Load data in a token. :param token: Token to load. :param force: Load token data even if signature expired. Default: False. """ try: data = self.loads(token) except SignatureExpired as e: if not force: raise data = e.payload del data["rnd"] return data
Get cryptographic engine.
def engine(self): """Get cryptographic engine.""" if not hasattr(self, '_engine'): from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) digest.update(current_app.config['SECRET_KEY'].encode('utf8')) fernet_key = urlsafe_b64encode(digest.finalize()) self._engine = Fernet(fernet_key) return self._engine
Create a token referencing the object id with extra data.
def create_token(self, obj_id, extra_data): """Create a token referencing the object id with extra data.""" return self.engine.encrypt( super(EncryptedTokenMixIn, self).create_token(obj_id, extra_data) )
Load data in a token.
def load_token(self, token, force=False): """Load data in a token. :param token: Token to load. :param force: Load token data even if signature expired. Default: False. """ return super(EncryptedTokenMixIn, self).load_token( self.engine.decrypt(token), force=force )
Multiple algorithm - compatible token validation.
def compat_validate_token(cls, *args, **kwargs): """Multiple algorithm-compatible token validation.""" data = None for algorithm in SUPPORTED_DIGEST_ALGORITHMS: data = cls(algorithm_name=algorithm).validate_token( *args, **kwargs) if not data: # move to next algorithm continue return data
Create the secret link token.
def create_token(cls, obj_id, data, expires_at=None): """Create the secret link token.""" if expires_at: s = TimedSecretLinkSerializer(expires_at=expires_at) else: s = SecretLinkSerializer() return s.create_token(obj_id, data)
Validate a secret link token ( non - expiring + expiring ).
def validate_token(cls, token, expected_data=None): """Validate a secret link token (non-expiring + expiring).""" for algorithm in SUPPORTED_DIGEST_ALGORITHMS: s = SecretLinkSerializer(algorithm_name=algorithm) st = TimedSecretLinkSerializer(algorithm_name=algorithm) try: for serializer in (s, st): data = serializer.validate_token( token, expected_data=expected_data) if data: return data except SignatureExpired: # move to next algorithm raise except BadData: continue
Validate a secret link token ( non - expiring + expiring ).
def load_token(cls, token, force=False): """Validate a secret link token (non-expiring + expiring).""" for algorithm in SUPPORTED_DIGEST_ALGORITHMS: s = SecretLinkSerializer(algorithm_name=algorithm) st = TimedSecretLinkSerializer(algorithm_name=algorithm) for serializer in (s, st): try: data = serializer.load_token(token, force=force) if data: return data except SignatureExpired: raise # signature was parsed and is expired except BadData: continue
32bit counter aggregator with wrapping
def Counter32(a, b, delta): """32bit counter aggregator with wrapping """ if b < a: c = 4294967295 - a return (c + b) / float(delta) return (b - a) / float(delta)
64bit counter aggregator with wrapping
def Counter64(a, b, delta): """64bit counter aggregator with wrapping """ if b < a: c = 18446744073709551615 - a return (c + b) / float(delta) return (b - a) / float(delta)
Counter derivative
def Counter(a, b, delta): """Counter derivative """ if b < a: return None return (b - a) / float(delta)
Method to calculate and format an average duration safely
def average_duration(total_duration, visits): """ Method to calculate and format an average duration safely """ if not visits: seconds = 0 else: seconds = int(round(total_duration / Decimal(visits))) duration = timedelta(seconds=seconds) return str(duration)
Setup output processors
def setupOutputs(self, config): """Setup output processors""" if self.proto == 'tcp': defaultOutput = { 'output': 'tensor.outputs.riemann.RiemannTCP', 'server': self.server, 'port': self.port } else: defaultOutput = { 'output': 'tensor.outputs.riemann.RiemannUDP', 'server': self.server, 'port': self.port } outputs = config.get('outputs', [defaultOutput]) for output in outputs: if not ('debug' in output): output['debug'] = self.debug cl = output['output'].split('.')[-1] # class path = '.'.join(output['output'].split('.')[:-1]) # import path # Import the module and construct the output object outputObj = getattr( importlib.import_module(path), cl)(output, self) name = output.get('name', None) # Add the output to our routing hash if name in self.outputs: self.outputs[name].append(outputObj) else: self.outputs[name] = [outputObj] # connect the output reactor.callLater(0, outputObj.createClient)
Sets up source objects from the given config
def setupSources(self, config): """Sets up source objects from the given config""" sources = config.get('sources', []) for source in sources: src = self.createSource(source) self.setupTriggers(source, src) self.sources.append(src)
Callback that all event sources call when they have a new event or list of events
def sendEvent(self, source, events): """Callback that all event sources call when they have a new event or list of events """ if isinstance(events, list): self.eventCounter += len(events) else: self.eventCounter += 1 events = [events] queue = self._aggregateQueue(events) if queue: if (source in self.critical) or (source in self.warn): self.setStates(source, queue) self.routeEvent(source, queue) queue = [] self.lastEvents[source] = time.time()
Watchdog timer function.
def sourceWatchdog(self): """Watchdog timer function. Recreates sources which have not generated events in 10*interval if they have watchdog set to true in their configuration """ for i, source in enumerate(self.sources): if not source.config.get('watchdog', False): continue sn = repr(source) last = self.lastEvents.get(source, None) if last: try: if last < (time.time()-(source.inter*10)): log.msg("Trying to restart stale source %s: %ss" % ( sn, int(time.time() - last) )) s = self.sources.pop(i) try: s.t.stop() except Exception as e: log.msg("Could not stop timer for %s: %s" % ( sn, e)) config = copy.deepcopy(s.config) del self.lastEvents[source] del s, source source = self.createSource(config) reactor.callLater(0, self._startSource, source) except Exception as e: log.msg("Could not reset source %s: %s" % ( sn, e))
Converts the input format to a regular expression as well as extracting fields
def _parse_format(self, format): """ Converts the input format to a regular expression, as well as extracting fields Raises an exception if it couldn't compile the generated regex. """ format = format.strip() format = re.sub('[ \t]+',' ',format) subpatterns = [] findquotes = re.compile(r'^\\"') findreferreragent = re.compile('Referer|User-Agent') findpercent = re.compile('^%.*t$') lstripquotes = re.compile(r'^\\"') rstripquotes = re.compile(r'\\"$') header = re.compile(r'.*%\{([^\}]+)\}i') for element in format.split(' '): hasquotes = 0 if findquotes.search(element): hasquotes = 1 if hasquotes: element = lstripquotes.sub('', element) element = rstripquotes.sub('', element) head = header.match(element) if head: self._names.append(head.groups()[0].lower()) self._types.append(str) else: self._names.append(self.alias(element)) self._types.append(self.types.get(element, [None, str])[1]) subpattern = '(\S*)' if hasquotes: if element == '%r' or findreferreragent.search(element): subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"' else: subpattern = r'\"([^\"]*)\"' elif findpercent.search(element): subpattern = r'(\[[^\]]+\])' elif element == '%U': subpattern = '(.+?)' subpatterns.append(subpattern) self._pattern = '^' + ' '.join(subpatterns) + '$' try: self._regex = re.compile(self._pattern) except Exception as e: raise ApacheLogParserError(e)
Parses a single line from the log file and returns a dictionary of it s contents.
def parse(self, line): """ Parses a single line from the log file and returns a dictionary of it's contents. Raises and exception if it couldn't parse the line """ line = line.strip() match = self._regex.match(line) if match: data = {} for i, e in enumerate(match.groups()): if e == "-": k, v = self._names[i], None else: k, v = self._names[i], self._types[i](e) data[k] = v return data raise ApacheLogParserError("Unable to parse: %s" % line)
Validate that date is in the future.
def validate_expires_at(form, field): """Validate that date is in the future.""" if form.accept.data: if not field.data or datetime.utcnow().date() >= field.data: raise validators.StopValidation(_( "Please provide a future date." )) if not field.data or \ datetime.utcnow().date() + timedelta(days=365) < field.data: raise validators.StopValidation(_( "Please provide a date no more than 1 year into the future." ))
Validate that accept have not been set.
def validate_accept(form, field): """Validate that accept have not been set.""" if field.data and form.reject.data: raise validators.ValidationError( _("Both reject and accept cannot be set at the same time.") )
Validate that accept have not been set.
def validate_reject(form, field): """Validate that accept have not been set.""" if field.data and form.accept.data: raise validators.ValidationError( _("Both reject and accept cannot be set at the same time.") )
Validate message.
def validate_message(form, field): """Validate message.""" if form.reject.data and not field.data.strip(): raise validators.ValidationError( _("You are required to provide message to the requester when" " you reject a request.") )
Verify token and save in session if it s valid.
def verify_token(): """Verify token and save in session if it's valid.""" try: from .models import SecretLink token = request.args['token'] # if the token is valid if token and SecretLink.validate_token(token, {}): # then save in session the token session['accessrequests-secret-token'] = token except KeyError: pass
Flask application initialization.
def init_app(self, app): """Flask application initialization.""" app.before_request(verify_token) self.init_config(app) state = _AppState(app=app) app.extensions['zenodo-accessrequests'] = state
Return a basic meaningful name based on device type
def name(self): """ Return a basic meaningful name based on device type """ if ( self.device_type and self.device_type.code in (DeviceType.MOBILE, DeviceType.TABLET) ): return self.device else: return self.browser