partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
freeze_from_checkpoint
|
Freeze and shrink the graph based on a checkpoint and the output node names.
|
benderthon/tf_freeze.py
|
def freeze_from_checkpoint(input_checkpoint, output_file_path, output_node_names):
"""Freeze and shrink the graph based on a checkpoint and the output node names."""
check_input_checkpoint(input_checkpoint)
output_node_names = output_node_names_string_as_list(output_node_names)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
freeze_graph.freeze_graph_with_def_protos(input_graph_def=sess.graph_def, input_saver_def=None,
input_checkpoint=input_checkpoint,
output_node_names=','.join(output_node_names),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0', output_graph=output_file_path,
clear_devices=True, initializer_nodes='')
|
def freeze_from_checkpoint(input_checkpoint, output_file_path, output_node_names):
"""Freeze and shrink the graph based on a checkpoint and the output node names."""
check_input_checkpoint(input_checkpoint)
output_node_names = output_node_names_string_as_list(output_node_names)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
freeze_graph.freeze_graph_with_def_protos(input_graph_def=sess.graph_def, input_saver_def=None,
input_checkpoint=input_checkpoint,
output_node_names=','.join(output_node_names),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0', output_graph=output_file_path,
clear_devices=True, initializer_nodes='')
|
[
"Freeze",
"and",
"shrink",
"the",
"graph",
"based",
"on",
"a",
"checkpoint",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L17-L30
|
[
"def",
"freeze_from_checkpoint",
"(",
"input_checkpoint",
",",
"output_file_path",
",",
"output_node_names",
")",
":",
"check_input_checkpoint",
"(",
"input_checkpoint",
")",
"output_node_names",
"=",
"output_node_names_string_as_list",
"(",
"output_node_names",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"restore_from_checkpoint",
"(",
"sess",
",",
"input_checkpoint",
")",
"freeze_graph",
".",
"freeze_graph_with_def_protos",
"(",
"input_graph_def",
"=",
"sess",
".",
"graph_def",
",",
"input_saver_def",
"=",
"None",
",",
"input_checkpoint",
"=",
"input_checkpoint",
",",
"output_node_names",
"=",
"','",
".",
"join",
"(",
"output_node_names",
")",
",",
"restore_op_name",
"=",
"'save/restore_all'",
",",
"filename_tensor_name",
"=",
"'save/Const:0'",
",",
"output_graph",
"=",
"output_file_path",
",",
"clear_devices",
"=",
"True",
",",
"initializer_nodes",
"=",
"''",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
freeze
|
Freeze and shrink the graph based on a session and the output node names.
|
benderthon/tf_freeze.py
|
def freeze(sess, output_file_path, output_node_names):
"""Freeze and shrink the graph based on a session and the output node names."""
with TemporaryDirectory() as temp_dir_name:
checkpoint_path = os.path.join(temp_dir_name, 'model.ckpt')
tf.train.Saver().save(sess, checkpoint_path)
freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
|
def freeze(sess, output_file_path, output_node_names):
"""Freeze and shrink the graph based on a session and the output node names."""
with TemporaryDirectory() as temp_dir_name:
checkpoint_path = os.path.join(temp_dir_name, 'model.ckpt')
tf.train.Saver().save(sess, checkpoint_path)
freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
|
[
"Freeze",
"and",
"shrink",
"the",
"graph",
"based",
"on",
"a",
"session",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L33-L39
|
[
"def",
"freeze",
"(",
"sess",
",",
"output_file_path",
",",
"output_node_names",
")",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"temp_dir_name",
":",
"checkpoint_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir_name",
",",
"'model.ckpt'",
")",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
".",
"save",
"(",
"sess",
",",
"checkpoint_path",
")",
"freeze_from_checkpoint",
"(",
"checkpoint_path",
",",
"output_file_path",
",",
"output_node_names",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_graph_only
|
Save a small version of the graph based on a session and the output node names.
|
benderthon/tf_freeze.py
|
def save_graph_only(sess, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a session and the output node names."""
for node in sess.graph_def.node:
node.device = ''
graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names)
output_dir, output_filename = os.path.split(output_file_path)
graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text)
|
def save_graph_only(sess, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a session and the output node names."""
for node in sess.graph_def.node:
node.device = ''
graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names)
output_dir, output_filename = os.path.split(output_file_path)
graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text)
|
[
"Save",
"a",
"small",
"version",
"of",
"the",
"graph",
"based",
"on",
"a",
"session",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L42-L48
|
[
"def",
"save_graph_only",
"(",
"sess",
",",
"output_file_path",
",",
"output_node_names",
",",
"as_text",
"=",
"False",
")",
":",
"for",
"node",
"in",
"sess",
".",
"graph_def",
".",
"node",
":",
"node",
".",
"device",
"=",
"''",
"graph_def",
"=",
"graph_util",
".",
"extract_sub_graph",
"(",
"sess",
".",
"graph_def",
",",
"output_node_names",
")",
"output_dir",
",",
"output_filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"output_file_path",
")",
"graph_io",
".",
"write_graph",
"(",
"graph_def",
",",
"output_dir",
",",
"output_filename",
",",
"as_text",
"=",
"as_text",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_graph_only_from_checkpoint
|
Save a small version of the graph based on a checkpoint and the output node names.
|
benderthon/tf_freeze.py
|
def save_graph_only_from_checkpoint(input_checkpoint, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a checkpoint and the output node names."""
check_input_checkpoint(input_checkpoint)
output_node_names = output_node_names_string_as_list(output_node_names)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
save_graph_only(sess, output_file_path, output_node_names, as_text=as_text)
|
def save_graph_only_from_checkpoint(input_checkpoint, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a checkpoint and the output node names."""
check_input_checkpoint(input_checkpoint)
output_node_names = output_node_names_string_as_list(output_node_names)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
save_graph_only(sess, output_file_path, output_node_names, as_text=as_text)
|
[
"Save",
"a",
"small",
"version",
"of",
"the",
"graph",
"based",
"on",
"a",
"checkpoint",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L51-L59
|
[
"def",
"save_graph_only_from_checkpoint",
"(",
"input_checkpoint",
",",
"output_file_path",
",",
"output_node_names",
",",
"as_text",
"=",
"False",
")",
":",
"check_input_checkpoint",
"(",
"input_checkpoint",
")",
"output_node_names",
"=",
"output_node_names_string_as_list",
"(",
"output_node_names",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"restore_from_checkpoint",
"(",
"sess",
",",
"input_checkpoint",
")",
"save_graph_only",
"(",
"sess",
",",
"output_file_path",
",",
"output_node_names",
",",
"as_text",
"=",
"as_text",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_weights
|
Save the weights of the trainable variables, each one in a different file in output_path.
|
benderthon/tf_freeze.py
|
def save_weights(sess, output_path, conv_var_names=None, conv_transpose_var_names=None):
"""Save the weights of the trainable variables, each one in a different file in output_path."""
if not conv_var_names:
conv_var_names = []
if not conv_transpose_var_names:
conv_transpose_var_names = []
for var in tf.trainable_variables():
filename = '{}-{}'.format(output_path, var.name.replace(':', '-').replace('/', '-'))
if var.name in conv_var_names:
var = tf.transpose(var, perm=[3, 0, 1, 2])
elif var.name in conv_transpose_var_names:
var = tf.transpose(var, perm=[3, 1, 0, 2])
value = sess.run(var)
# noinspection PyTypeChecker
with open(filename, 'w') as file_:
value.tofile(file_)
|
def save_weights(sess, output_path, conv_var_names=None, conv_transpose_var_names=None):
"""Save the weights of the trainable variables, each one in a different file in output_path."""
if not conv_var_names:
conv_var_names = []
if not conv_transpose_var_names:
conv_transpose_var_names = []
for var in tf.trainable_variables():
filename = '{}-{}'.format(output_path, var.name.replace(':', '-').replace('/', '-'))
if var.name in conv_var_names:
var = tf.transpose(var, perm=[3, 0, 1, 2])
elif var.name in conv_transpose_var_names:
var = tf.transpose(var, perm=[3, 1, 0, 2])
value = sess.run(var)
# noinspection PyTypeChecker
with open(filename, 'w') as file_:
value.tofile(file_)
|
[
"Save",
"the",
"weights",
"of",
"the",
"trainable",
"variables",
"each",
"one",
"in",
"a",
"different",
"file",
"in",
"output_path",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L62-L82
|
[
"def",
"save_weights",
"(",
"sess",
",",
"output_path",
",",
"conv_var_names",
"=",
"None",
",",
"conv_transpose_var_names",
"=",
"None",
")",
":",
"if",
"not",
"conv_var_names",
":",
"conv_var_names",
"=",
"[",
"]",
"if",
"not",
"conv_transpose_var_names",
":",
"conv_transpose_var_names",
"=",
"[",
"]",
"for",
"var",
"in",
"tf",
".",
"trainable_variables",
"(",
")",
":",
"filename",
"=",
"'{}-{}'",
".",
"format",
"(",
"output_path",
",",
"var",
".",
"name",
".",
"replace",
"(",
"':'",
",",
"'-'",
")",
".",
"replace",
"(",
"'/'",
",",
"'-'",
")",
")",
"if",
"var",
".",
"name",
"in",
"conv_var_names",
":",
"var",
"=",
"tf",
".",
"transpose",
"(",
"var",
",",
"perm",
"=",
"[",
"3",
",",
"0",
",",
"1",
",",
"2",
"]",
")",
"elif",
"var",
".",
"name",
"in",
"conv_transpose_var_names",
":",
"var",
"=",
"tf",
".",
"transpose",
"(",
"var",
",",
"perm",
"=",
"[",
"3",
",",
"1",
",",
"0",
",",
"2",
"]",
")",
"value",
"=",
"sess",
".",
"run",
"(",
"var",
")",
"# noinspection PyTypeChecker",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"file_",
":",
"value",
".",
"tofile",
"(",
"file_",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_weights_from_checkpoint
|
Save the weights of the trainable variables given a checkpoint, each one in a different file in output_path.
|
benderthon/tf_freeze.py
|
def save_weights_from_checkpoint(input_checkpoint, output_path, conv_var_names=None, conv_transpose_var_names=None):
"""Save the weights of the trainable variables given a checkpoint, each one in a different file in output_path."""
check_input_checkpoint(input_checkpoint)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
save_weights(sess, output_path, conv_var_names=conv_var_names,
conv_transpose_var_names=conv_transpose_var_names)
|
def save_weights_from_checkpoint(input_checkpoint, output_path, conv_var_names=None, conv_transpose_var_names=None):
"""Save the weights of the trainable variables given a checkpoint, each one in a different file in output_path."""
check_input_checkpoint(input_checkpoint)
with tf.Session() as sess:
restore_from_checkpoint(sess, input_checkpoint)
save_weights(sess, output_path, conv_var_names=conv_var_names,
conv_transpose_var_names=conv_transpose_var_names)
|
[
"Save",
"the",
"weights",
"of",
"the",
"trainable",
"variables",
"given",
"a",
"checkpoint",
"each",
"one",
"in",
"a",
"different",
"file",
"in",
"output_path",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L85-L92
|
[
"def",
"save_weights_from_checkpoint",
"(",
"input_checkpoint",
",",
"output_path",
",",
"conv_var_names",
"=",
"None",
",",
"conv_transpose_var_names",
"=",
"None",
")",
":",
"check_input_checkpoint",
"(",
"input_checkpoint",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"restore_from_checkpoint",
"(",
"sess",
",",
"input_checkpoint",
")",
"save_weights",
"(",
"sess",
",",
"output_path",
",",
"conv_var_names",
"=",
"conv_var_names",
",",
"conv_transpose_var_names",
"=",
"conv_transpose_var_names",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
restore_from_checkpoint
|
Return a TensorFlow saver from a checkpoint containing the metagraph.
|
benderthon/util.py
|
def restore_from_checkpoint(sess, input_checkpoint):
"""Return a TensorFlow saver from a checkpoint containing the metagraph."""
saver = tf.train.import_meta_graph('{}.meta'.format(input_checkpoint))
saver.restore(sess, input_checkpoint)
return saver
|
def restore_from_checkpoint(sess, input_checkpoint):
"""Return a TensorFlow saver from a checkpoint containing the metagraph."""
saver = tf.train.import_meta_graph('{}.meta'.format(input_checkpoint))
saver.restore(sess, input_checkpoint)
return saver
|
[
"Return",
"a",
"TensorFlow",
"saver",
"from",
"a",
"checkpoint",
"containing",
"the",
"metagraph",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/util.py#L22-L26
|
[
"def",
"restore_from_checkpoint",
"(",
"sess",
",",
"input_checkpoint",
")",
":",
"saver",
"=",
"tf",
".",
"train",
".",
"import_meta_graph",
"(",
"'{}.meta'",
".",
"format",
"(",
"input_checkpoint",
")",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"input_checkpoint",
")",
"return",
"saver"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
BaseNode.parse
|
Parse the tag, instantiate the class.
:type parser: django.template.base.Parser
:type token: django.template.base.Token
|
tag_parser/basetags.py
|
def parse(cls, parser, token):
"""
Parse the tag, instantiate the class.
:type parser: django.template.base.Parser
:type token: django.template.base.Token
"""
tag_name, args, kwargs = parse_token_kwargs(
parser, token,
allowed_kwargs=cls.allowed_kwargs,
compile_args=cls.compile_args,
compile_kwargs=cls.compile_kwargs
)
cls.validate_args(tag_name, *args, **kwargs)
if cls.end_tag_name:
kwargs['nodelist'] = parser.parse((cls.end_tag_name,))
parser.delete_first_token()
return cls(tag_name, *args, **kwargs)
|
def parse(cls, parser, token):
"""
Parse the tag, instantiate the class.
:type parser: django.template.base.Parser
:type token: django.template.base.Token
"""
tag_name, args, kwargs = parse_token_kwargs(
parser, token,
allowed_kwargs=cls.allowed_kwargs,
compile_args=cls.compile_args,
compile_kwargs=cls.compile_kwargs
)
cls.validate_args(tag_name, *args, **kwargs)
if cls.end_tag_name:
kwargs['nodelist'] = parser.parse((cls.end_tag_name,))
parser.delete_first_token()
return cls(tag_name, *args, **kwargs)
|
[
"Parse",
"the",
"tag",
"instantiate",
"the",
"class",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L128-L146
|
[
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"token",
")",
":",
"tag_name",
",",
"args",
",",
"kwargs",
"=",
"parse_token_kwargs",
"(",
"parser",
",",
"token",
",",
"allowed_kwargs",
"=",
"cls",
".",
"allowed_kwargs",
",",
"compile_args",
"=",
"cls",
".",
"compile_args",
",",
"compile_kwargs",
"=",
"cls",
".",
"compile_kwargs",
")",
"cls",
".",
"validate_args",
"(",
"tag_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"cls",
".",
"end_tag_name",
":",
"kwargs",
"[",
"'nodelist'",
"]",
"=",
"parser",
".",
"parse",
"(",
"(",
"cls",
".",
"end_tag_name",
",",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"return",
"cls",
"(",
"tag_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseNode.render
|
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
|
tag_parser/basetags.py
|
def render(self, context):
"""
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
"""
# Resolve token kwargs
tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args
tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs
return self.render_tag(context, *tag_args, **tag_kwargs)
|
def render(self, context):
"""
The default Django render() method for the tag.
This method resolves the filter expressions, and calls :func:`render_tag`.
"""
# Resolve token kwargs
tag_args = [expr.resolve(context) for expr in self.args] if self.compile_args else self.args
tag_kwargs = dict([(name, expr.resolve(context)) for name, expr in six.iteritems(self.kwargs)]) if self.compile_kwargs else self.kwargs
return self.render_tag(context, *tag_args, **tag_kwargs)
|
[
"The",
"default",
"Django",
"render",
"()",
"method",
"for",
"the",
"tag",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L148-L158
|
[
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"# Resolve token kwargs",
"tag_args",
"=",
"[",
"expr",
".",
"resolve",
"(",
"context",
")",
"for",
"expr",
"in",
"self",
".",
"args",
"]",
"if",
"self",
".",
"compile_args",
"else",
"self",
".",
"args",
"tag_kwargs",
"=",
"dict",
"(",
"[",
"(",
"name",
",",
"expr",
".",
"resolve",
"(",
"context",
")",
")",
"for",
"name",
",",
"expr",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"kwargs",
")",
"]",
")",
"if",
"self",
".",
"compile_kwargs",
"else",
"self",
".",
"kwargs",
"return",
"self",
".",
"render_tag",
"(",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseNode.render_tag
|
Render the tag, with all arguments resolved to their actual values.
|
tag_parser/basetags.py
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Render the tag, with all arguments resolved to their actual values.
"""
raise NotImplementedError("{0}.render_tag() is not implemented!".format(self.__class__.__name__))
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Render the tag, with all arguments resolved to their actual values.
"""
raise NotImplementedError("{0}.render_tag() is not implemented!".format(self.__class__.__name__))
|
[
"Render",
"the",
"tag",
"with",
"all",
"arguments",
"resolved",
"to",
"their",
"actual",
"values",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L160-L164
|
[
"def",
"render_tag",
"(",
"self",
",",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"{0}.render_tag() is not implemented!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseNode.validate_args
|
Validate the syntax of the template tag.
|
tag_parser/basetags.py
|
def validate_args(cls, tag_name, *args, **kwargs):
"""
Validate the syntax of the template tag.
"""
if cls.min_args is not None and len(args) < cls.min_args:
if cls.min_args == 1:
raise TemplateSyntaxError("'{0}' tag requires at least {1} argument".format(tag_name, cls.min_args))
else:
raise TemplateSyntaxError("'{0}' tag requires at least {1} arguments".format(tag_name, cls.min_args))
if cls.max_args is not None and len(args) > cls.max_args:
if cls.max_args == 0:
if cls.allowed_kwargs:
raise TemplateSyntaxError("'{0}' tag only allows keywords arguments, for example {1}=\"...\".".format(tag_name, cls.allowed_kwargs[0]))
else:
raise TemplateSyntaxError("'{0}' tag doesn't support any arguments".format(tag_name))
elif cls.max_args == 1:
raise TemplateSyntaxError("'{0}' tag only allows {1} argument.".format(tag_name, cls.max_args))
else:
raise TemplateSyntaxError("'{0}' tag only allows {1} arguments.".format(tag_name, cls.max_args))
|
def validate_args(cls, tag_name, *args, **kwargs):
"""
Validate the syntax of the template tag.
"""
if cls.min_args is not None and len(args) < cls.min_args:
if cls.min_args == 1:
raise TemplateSyntaxError("'{0}' tag requires at least {1} argument".format(tag_name, cls.min_args))
else:
raise TemplateSyntaxError("'{0}' tag requires at least {1} arguments".format(tag_name, cls.min_args))
if cls.max_args is not None and len(args) > cls.max_args:
if cls.max_args == 0:
if cls.allowed_kwargs:
raise TemplateSyntaxError("'{0}' tag only allows keywords arguments, for example {1}=\"...\".".format(tag_name, cls.allowed_kwargs[0]))
else:
raise TemplateSyntaxError("'{0}' tag doesn't support any arguments".format(tag_name))
elif cls.max_args == 1:
raise TemplateSyntaxError("'{0}' tag only allows {1} argument.".format(tag_name, cls.max_args))
else:
raise TemplateSyntaxError("'{0}' tag only allows {1} arguments.".format(tag_name, cls.max_args))
|
[
"Validate",
"the",
"syntax",
"of",
"the",
"template",
"tag",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L167-L186
|
[
"def",
"validate_args",
"(",
"cls",
",",
"tag_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"cls",
".",
"min_args",
"is",
"not",
"None",
"and",
"len",
"(",
"args",
")",
"<",
"cls",
".",
"min_args",
":",
"if",
"cls",
".",
"min_args",
"==",
"1",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag requires at least {1} argument\"",
".",
"format",
"(",
"tag_name",
",",
"cls",
".",
"min_args",
")",
")",
"else",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag requires at least {1} arguments\"",
".",
"format",
"(",
"tag_name",
",",
"cls",
".",
"min_args",
")",
")",
"if",
"cls",
".",
"max_args",
"is",
"not",
"None",
"and",
"len",
"(",
"args",
")",
">",
"cls",
".",
"max_args",
":",
"if",
"cls",
".",
"max_args",
"==",
"0",
":",
"if",
"cls",
".",
"allowed_kwargs",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag only allows keywords arguments, for example {1}=\\\"...\\\".\"",
".",
"format",
"(",
"tag_name",
",",
"cls",
".",
"allowed_kwargs",
"[",
"0",
"]",
")",
")",
"else",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag doesn't support any arguments\"",
".",
"format",
"(",
"tag_name",
")",
")",
"elif",
"cls",
".",
"max_args",
"==",
"1",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag only allows {1} argument.\"",
".",
"format",
"(",
"tag_name",
",",
"cls",
".",
"max_args",
")",
")",
"else",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'{0}' tag only allows {1} arguments.\"",
".",
"format",
"(",
"tag_name",
",",
"cls",
".",
"max_args",
")",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseInclusionNode.get_context_data
|
Return the context data for the included template.
|
tag_parser/basetags.py
|
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
Return the context data for the included template.
"""
raise NotImplementedError("{0}.get_context_data() is not implemented.".format(self.__class__.__name__))
|
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
Return the context data for the included template.
"""
raise NotImplementedError("{0}.get_context_data() is not implemented.".format(self.__class__.__name__))
|
[
"Return",
"the",
"context",
"data",
"for",
"the",
"included",
"template",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L266-L270
|
[
"def",
"get_context_data",
"(",
"self",
",",
"parent_context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"{0}.get_context_data() is not implemented.\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseInclusionNode.get_context
|
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
|
tag_parser/basetags.py
|
def get_context(self, parent_context, data):
"""
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
"""
if django.VERSION >= (1, 8):
new_context = parent_context.new(data)
else:
settings = {
'autoescape': parent_context.autoescape,
'current_app': parent_context.current_app,
'use_l10n': parent_context.use_l10n,
'use_tz': parent_context.use_tz,
}
new_context = Context(data, **settings)
# Pass CSRF token for same reasons as @register.inclusion_tag does.
csrf_token = parent_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return new_context
|
def get_context(self, parent_context, data):
"""
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
"""
if django.VERSION >= (1, 8):
new_context = parent_context.new(data)
else:
settings = {
'autoescape': parent_context.autoescape,
'current_app': parent_context.current_app,
'use_l10n': parent_context.use_l10n,
'use_tz': parent_context.use_tz,
}
new_context = Context(data, **settings)
# Pass CSRF token for same reasons as @register.inclusion_tag does.
csrf_token = parent_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return new_context
|
[
"Wrap",
"the",
"context",
"data",
"in",
"a",
":",
"class",
":",
"~django",
".",
"template",
".",
"Context",
"object",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L272-L299
|
[
"def",
"get_context",
"(",
"self",
",",
"parent_context",
",",
"data",
")",
":",
"if",
"django",
".",
"VERSION",
">=",
"(",
"1",
",",
"8",
")",
":",
"new_context",
"=",
"parent_context",
".",
"new",
"(",
"data",
")",
"else",
":",
"settings",
"=",
"{",
"'autoescape'",
":",
"parent_context",
".",
"autoescape",
",",
"'current_app'",
":",
"parent_context",
".",
"current_app",
",",
"'use_l10n'",
":",
"parent_context",
".",
"use_l10n",
",",
"'use_tz'",
":",
"parent_context",
".",
"use_tz",
",",
"}",
"new_context",
"=",
"Context",
"(",
"data",
",",
"*",
"*",
"settings",
")",
"# Pass CSRF token for same reasons as @register.inclusion_tag does.",
"csrf_token",
"=",
"parent_context",
".",
"get",
"(",
"'csrf_token'",
",",
"None",
")",
"if",
"csrf_token",
"is",
"not",
"None",
":",
"new_context",
"[",
"'csrf_token'",
"]",
"=",
"csrf_token",
"return",
"new_context"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseAssignmentNode.render_tag
|
Rendering of the tag. It either assigns the value as variable, or renders it.
|
tag_parser/basetags.py
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Rendering of the tag. It either assigns the value as variable, or renders it.
"""
if self.as_var:
# Assign the value in the parent context
context[self.as_var] = self.get_value(context, *tag_args, **tag_kwargs)
return u''
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Rendering of the tag. It either assigns the value as variable, or renders it.
"""
if self.as_var:
# Assign the value in the parent context
context[self.as_var] = self.get_value(context, *tag_args, **tag_kwargs)
return u''
|
[
"Rendering",
"of",
"the",
"tag",
".",
"It",
"either",
"assigns",
"the",
"value",
"as",
"variable",
"or",
"renders",
"it",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L323-L331
|
[
"def",
"render_tag",
"(",
"self",
",",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
":",
"if",
"self",
".",
"as_var",
":",
"# Assign the value in the parent context",
"context",
"[",
"self",
".",
"as_var",
"]",
"=",
"self",
".",
"get_value",
"(",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
"return",
"u''"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseAssignmentOrInclusionNode.parse
|
Parse the "as var" syntax.
|
tag_parser/basetags.py
|
def parse(cls, parser, token):
"""
Parse the "as var" syntax.
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(parser, bits, ('template',) + cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs)
# Pass through standard chain
cls.validate_args(tag_name, *args)
return cls(tag_name, as_var, *args, **kwargs)
|
def parse(cls, parser, token):
"""
Parse the "as var" syntax.
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(parser, bits, ('template',) + cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs)
# Pass through standard chain
cls.validate_args(tag_name, *args)
return cls(tag_name, as_var, *args, **kwargs)
|
[
"Parse",
"the",
"as",
"var",
"syntax",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L364-L373
|
[
"def",
"parse",
"(",
"cls",
",",
"parser",
",",
"token",
")",
":",
"bits",
",",
"as_var",
"=",
"parse_as_var",
"(",
"parser",
",",
"token",
")",
"tag_name",
",",
"args",
",",
"kwargs",
"=",
"parse_token_kwargs",
"(",
"parser",
",",
"bits",
",",
"(",
"'template'",
",",
")",
"+",
"cls",
".",
"allowed_kwargs",
",",
"compile_args",
"=",
"cls",
".",
"compile_args",
",",
"compile_kwargs",
"=",
"cls",
".",
"compile_kwargs",
")",
"# Pass through standard chain",
"cls",
".",
"validate_args",
"(",
"tag_name",
",",
"*",
"args",
")",
"return",
"cls",
"(",
"tag_name",
",",
"as_var",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseAssignmentOrInclusionNode.render_tag
|
Rendering of the tag. It either assigns the value as variable, or renders it.
|
tag_parser/basetags.py
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Rendering of the tag. It either assigns the value as variable, or renders it.
"""
# Be very explicit about which base functionality is used:
# Using super() for mixin support will not work nicely anyway here.
if self.as_var:
# Assign the value in the parent context
return BaseAssignmentNode.render_tag(self, context, *tag_args, **tag_kwargs)
else:
# Render the output using the BaseInclusionNode features
return BaseInclusionNode.render_tag(self, context, *tag_args, **tag_kwargs)
|
def render_tag(self, context, *tag_args, **tag_kwargs):
"""
Rendering of the tag. It either assigns the value as variable, or renders it.
"""
# Be very explicit about which base functionality is used:
# Using super() for mixin support will not work nicely anyway here.
if self.as_var:
# Assign the value in the parent context
return BaseAssignmentNode.render_tag(self, context, *tag_args, **tag_kwargs)
else:
# Render the output using the BaseInclusionNode features
return BaseInclusionNode.render_tag(self, context, *tag_args, **tag_kwargs)
|
[
"Rendering",
"of",
"the",
"tag",
".",
"It",
"either",
"assigns",
"the",
"value",
"as",
"variable",
"or",
"renders",
"it",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L375-L386
|
[
"def",
"render_tag",
"(",
"self",
",",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
":",
"# Be very explicit about which base functionality is used:",
"# Using super() for mixin support will not work nicely anyway here.",
"if",
"self",
".",
"as_var",
":",
"# Assign the value in the parent context",
"return",
"BaseAssignmentNode",
".",
"render_tag",
"(",
"self",
",",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
"else",
":",
"# Render the output using the BaseInclusionNode features",
"return",
"BaseInclusionNode",
".",
"render_tag",
"(",
"self",
",",
"context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
BaseAssignmentOrInclusionNode.get_context_data
|
Return the context data for the inclusion tag.
Returns ``{'value': self.get_value(parent_context, *tag_args, **tag_kwargs)}`` by default.
|
tag_parser/basetags.py
|
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
Return the context data for the inclusion tag.
Returns ``{'value': self.get_value(parent_context, *tag_args, **tag_kwargs)}`` by default.
"""
if 'template' not in self.allowed_kwargs:
# The overwritten get_value() doesn't have to take care of our customly inserted tag parameters,
# It can safely assume passing **tag_kwargs to another function.
tag_kwargs.pop('template', None)
return {
self.context_value_name: self.get_value(parent_context, *tag_args, **tag_kwargs)
}
|
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
Return the context data for the inclusion tag.
Returns ``{'value': self.get_value(parent_context, *tag_args, **tag_kwargs)}`` by default.
"""
if 'template' not in self.allowed_kwargs:
# The overwritten get_value() doesn't have to take care of our customly inserted tag parameters,
# It can safely assume passing **tag_kwargs to another function.
tag_kwargs.pop('template', None)
return {
self.context_value_name: self.get_value(parent_context, *tag_args, **tag_kwargs)
}
|
[
"Return",
"the",
"context",
"data",
"for",
"the",
"inclusion",
"tag",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L388-L401
|
[
"def",
"get_context_data",
"(",
"self",
",",
"parent_context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
":",
"if",
"'template'",
"not",
"in",
"self",
".",
"allowed_kwargs",
":",
"# The overwritten get_value() doesn't have to take care of our customly inserted tag parameters,",
"# It can safely assume passing **tag_kwargs to another function.",
"tag_kwargs",
".",
"pop",
"(",
"'template'",
",",
"None",
")",
"return",
"{",
"self",
".",
"context_value_name",
":",
"self",
".",
"get_value",
"(",
"parent_context",
",",
"*",
"tag_args",
",",
"*",
"*",
"tag_kwargs",
")",
"}"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
caffe_to_tensorflow_session
|
Create a TensorFlow Session from a Caffe model.
|
benderthon/caffe_freeze.py
|
def caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name='Graph',
conversion_out_dir_path=None, use_padding_same=False):
"""Create a TensorFlow Session from a Caffe model."""
try:
# noinspection PyUnresolvedReferences
from caffeflow import convert
except ImportError:
raise Exception("caffeflow package needs to be installed to freeze Caffe models. Check out the README file.")
with (dummy_context_mgr(conversion_out_dir_path) or util.TemporaryDirectory()) as dir_path:
params_values_output_path = os.path.join(dir_path, 'params_values.npy')
network_output_path = os.path.join(dir_path, 'network.py')
convert.convert(caffe_def_path, caffemodel_path, params_values_output_path, network_output_path, False,
use_padding_same=use_padding_same)
network_module = imp.load_source('module.name', network_output_path)
network_class = getattr(network_module, graph_name)
network = network_class(inputs)
sess = tf.Session()
network.load(params_values_output_path, sess)
return sess
|
def caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name='Graph',
conversion_out_dir_path=None, use_padding_same=False):
"""Create a TensorFlow Session from a Caffe model."""
try:
# noinspection PyUnresolvedReferences
from caffeflow import convert
except ImportError:
raise Exception("caffeflow package needs to be installed to freeze Caffe models. Check out the README file.")
with (dummy_context_mgr(conversion_out_dir_path) or util.TemporaryDirectory()) as dir_path:
params_values_output_path = os.path.join(dir_path, 'params_values.npy')
network_output_path = os.path.join(dir_path, 'network.py')
convert.convert(caffe_def_path, caffemodel_path, params_values_output_path, network_output_path, False,
use_padding_same=use_padding_same)
network_module = imp.load_source('module.name', network_output_path)
network_class = getattr(network_module, graph_name)
network = network_class(inputs)
sess = tf.Session()
network.load(params_values_output_path, sess)
return sess
|
[
"Create",
"a",
"TensorFlow",
"Session",
"from",
"a",
"Caffe",
"model",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/caffe_freeze.py#L21-L45
|
[
"def",
"caffe_to_tensorflow_session",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"graph_name",
"=",
"'Graph'",
",",
"conversion_out_dir_path",
"=",
"None",
",",
"use_padding_same",
"=",
"False",
")",
":",
"try",
":",
"# noinspection PyUnresolvedReferences",
"from",
"caffeflow",
"import",
"convert",
"except",
"ImportError",
":",
"raise",
"Exception",
"(",
"\"caffeflow package needs to be installed to freeze Caffe models. Check out the README file.\"",
")",
"with",
"(",
"dummy_context_mgr",
"(",
"conversion_out_dir_path",
")",
"or",
"util",
".",
"TemporaryDirectory",
"(",
")",
")",
"as",
"dir_path",
":",
"params_values_output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"'params_values.npy'",
")",
"network_output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"'network.py'",
")",
"convert",
".",
"convert",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"params_values_output_path",
",",
"network_output_path",
",",
"False",
",",
"use_padding_same",
"=",
"use_padding_same",
")",
"network_module",
"=",
"imp",
".",
"load_source",
"(",
"'module.name'",
",",
"network_output_path",
")",
"network_class",
"=",
"getattr",
"(",
"network_module",
",",
"graph_name",
")",
"network",
"=",
"network_class",
"(",
"inputs",
")",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"network",
".",
"load",
"(",
"params_values_output_path",
",",
"sess",
")",
"return",
"sess"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
freeze
|
Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names.
|
benderthon/caffe_freeze.py
|
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False):
"""Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
conversion_out_dir_path=conversion_out_dir_path,
use_padding_same=use_padding_same) as sess:
saver = tf.train.Saver()
with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path:
checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt')
saver.save(sess, checkpoint_path)
output_node_names = util.output_node_names_string_as_list(output_node_names)
tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
|
def freeze(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
conversion_out_dir_path=None, checkpoint_out_path=None, use_padding_same=False):
"""Freeze and shrink the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
conversion_out_dir_path=conversion_out_dir_path,
use_padding_same=use_padding_same) as sess:
saver = tf.train.Saver()
with (dummy_context_mgr(checkpoint_out_path) or util.TemporaryDirectory()) as temp_dir_path:
checkpoint_path = checkpoint_out_path or os.path.join(temp_dir_path, 'pose.ckpt')
saver.save(sess, checkpoint_path)
output_node_names = util.output_node_names_string_as_list(output_node_names)
tf_freeze.freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
|
[
"Freeze",
"and",
"shrink",
"the",
"graph",
"based",
"on",
"a",
"Caffe",
"model",
"the",
"input",
"tensors",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/caffe_freeze.py#L48-L62
|
[
"def",
"freeze",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"output_file_path",
",",
"output_node_names",
",",
"graph_name",
"=",
"'Graph'",
",",
"conversion_out_dir_path",
"=",
"None",
",",
"checkpoint_out_path",
"=",
"None",
",",
"use_padding_same",
"=",
"False",
")",
":",
"with",
"caffe_to_tensorflow_session",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"graph_name",
"=",
"graph_name",
",",
"conversion_out_dir_path",
"=",
"conversion_out_dir_path",
",",
"use_padding_same",
"=",
"use_padding_same",
")",
"as",
"sess",
":",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"with",
"(",
"dummy_context_mgr",
"(",
"checkpoint_out_path",
")",
"or",
"util",
".",
"TemporaryDirectory",
"(",
")",
")",
"as",
"temp_dir_path",
":",
"checkpoint_path",
"=",
"checkpoint_out_path",
"or",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir_path",
",",
"'pose.ckpt'",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"checkpoint_path",
")",
"output_node_names",
"=",
"util",
".",
"output_node_names_string_as_list",
"(",
"output_node_names",
")",
"tf_freeze",
".",
"freeze_from_checkpoint",
"(",
"checkpoint_path",
",",
"output_file_path",
",",
"output_node_names",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_graph_only
|
Save a small version of the graph based on a Caffe model, the input tensors and the output node names.
|
benderthon/caffe_freeze.py
|
def save_graph_only(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
use_padding_same=False):
"""Save a small version of the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
use_padding_same=use_padding_same) as sess:
tf_freeze.save_graph_only(sess, output_file_path, output_node_names)
|
def save_graph_only(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',
use_padding_same=False):
"""Save a small version of the graph based on a Caffe model, the input tensors and the output node names."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
use_padding_same=use_padding_same) as sess:
tf_freeze.save_graph_only(sess, output_file_path, output_node_names)
|
[
"Save",
"a",
"small",
"version",
"of",
"the",
"graph",
"based",
"on",
"a",
"Caffe",
"model",
"the",
"input",
"tensors",
"and",
"the",
"output",
"node",
"names",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/caffe_freeze.py#L65-L70
|
[
"def",
"save_graph_only",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"output_file_path",
",",
"output_node_names",
",",
"graph_name",
"=",
"'Graph'",
",",
"use_padding_same",
"=",
"False",
")",
":",
"with",
"caffe_to_tensorflow_session",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"graph_name",
"=",
"graph_name",
",",
"use_padding_same",
"=",
"use_padding_same",
")",
"as",
"sess",
":",
"tf_freeze",
".",
"save_graph_only",
"(",
"sess",
",",
"output_file_path",
",",
"output_node_names",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
save_weights
|
Save the weights of the trainable variables, each one in a different file in output_path.
|
benderthon/caffe_freeze.py
|
def save_weights(caffe_def_path, caffemodel_path, inputs, output_path, graph_name='Graph', conv_var_names=None,
conv_transpose_var_names=None, use_padding_same=False):
"""Save the weights of the trainable variables, each one in a different file in output_path."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
use_padding_same=use_padding_same) as sess:
tf_freeze.save_weights(sess, output_path, conv_var_names=conv_var_names,
conv_transpose_var_names=conv_transpose_var_names)
|
def save_weights(caffe_def_path, caffemodel_path, inputs, output_path, graph_name='Graph', conv_var_names=None,
conv_transpose_var_names=None, use_padding_same=False):
"""Save the weights of the trainable variables, each one in a different file in output_path."""
with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,
use_padding_same=use_padding_same) as sess:
tf_freeze.save_weights(sess, output_path, conv_var_names=conv_var_names,
conv_transpose_var_names=conv_transpose_var_names)
|
[
"Save",
"the",
"weights",
"of",
"the",
"trainable",
"variables",
"each",
"one",
"in",
"a",
"different",
"file",
"in",
"output_path",
"."
] |
xmartlabs/benderthon
|
python
|
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/caffe_freeze.py#L73-L79
|
[
"def",
"save_weights",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"output_path",
",",
"graph_name",
"=",
"'Graph'",
",",
"conv_var_names",
"=",
"None",
",",
"conv_transpose_var_names",
"=",
"None",
",",
"use_padding_same",
"=",
"False",
")",
":",
"with",
"caffe_to_tensorflow_session",
"(",
"caffe_def_path",
",",
"caffemodel_path",
",",
"inputs",
",",
"graph_name",
"=",
"graph_name",
",",
"use_padding_same",
"=",
"use_padding_same",
")",
"as",
"sess",
":",
"tf_freeze",
".",
"save_weights",
"(",
"sess",
",",
"output_path",
",",
"conv_var_names",
"=",
"conv_var_names",
",",
"conv_transpose_var_names",
"=",
"conv_transpose_var_names",
")"
] |
810b6fb90f56136257e7ed12e5a30d17ad7ce6ba
|
test
|
make_rows
|
Make a sequence into rows of num_columns columns.
>>> tuple(make_rows(2, [1, 2, 3, 4, 5]))
((1, 4), (2, 5), (3, None))
>>> tuple(make_rows(3, [1, 2, 3, 4, 5]))
((1, 3, 5), (2, 4, None))
|
jaraco/itertools.py
|
def make_rows(num_columns, seq):
"""
Make a sequence into rows of num_columns columns.
>>> tuple(make_rows(2, [1, 2, 3, 4, 5]))
((1, 4), (2, 5), (3, None))
>>> tuple(make_rows(3, [1, 2, 3, 4, 5]))
((1, 3, 5), (2, 4, None))
"""
# calculate the minimum number of rows necessary to fit the list in
# num_columns Columns
num_rows, partial = divmod(len(seq), num_columns)
if partial:
num_rows += 1
# break the seq into num_columns of length num_rows
try:
result = more_itertools.grouper(seq, num_rows)
except TypeError:
# more_itertools before 6.x
result = more_itertools.grouper(num_rows, seq)
# result is now a list of columns... transpose it to return a list
# of rows
return zip(*result)
|
def make_rows(num_columns, seq):
"""
Make a sequence into rows of num_columns columns.
>>> tuple(make_rows(2, [1, 2, 3, 4, 5]))
((1, 4), (2, 5), (3, None))
>>> tuple(make_rows(3, [1, 2, 3, 4, 5]))
((1, 3, 5), (2, 4, None))
"""
# calculate the minimum number of rows necessary to fit the list in
# num_columns Columns
num_rows, partial = divmod(len(seq), num_columns)
if partial:
num_rows += 1
# break the seq into num_columns of length num_rows
try:
result = more_itertools.grouper(seq, num_rows)
except TypeError:
# more_itertools before 6.x
result = more_itertools.grouper(num_rows, seq)
# result is now a list of columns... transpose it to return a list
# of rows
return zip(*result)
|
[
"Make",
"a",
"sequence",
"into",
"rows",
"of",
"num_columns",
"columns",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L29-L51
|
[
"def",
"make_rows",
"(",
"num_columns",
",",
"seq",
")",
":",
"# calculate the minimum number of rows necessary to fit the list in",
"# num_columns Columns",
"num_rows",
",",
"partial",
"=",
"divmod",
"(",
"len",
"(",
"seq",
")",
",",
"num_columns",
")",
"if",
"partial",
":",
"num_rows",
"+=",
"1",
"# break the seq into num_columns of length num_rows",
"try",
":",
"result",
"=",
"more_itertools",
".",
"grouper",
"(",
"seq",
",",
"num_rows",
")",
"except",
"TypeError",
":",
"# more_itertools before 6.x",
"result",
"=",
"more_itertools",
".",
"grouper",
"(",
"num_rows",
",",
"seq",
")",
"# result is now a list of columns... transpose it to return a list",
"# of rows",
"return",
"zip",
"(",
"*",
"result",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
bisect
|
Split a sequence into two sequences: the first is elements that
return False for func(element) and the second for True for
func(element).
By default, func is ``bool``, so uses the truth value of the object.
>>> is_odd = lambda n: n%2
>>> even, odd = bisect(range(5), is_odd)
>>> list(odd)
[1, 3]
>>> list(even)
[0, 2, 4]
>>> other, zeros = bisect(reversed(range(5)))
>>> list(zeros)
[0]
>>> list(other)
[4, 3, 2, 1]
|
jaraco/itertools.py
|
def bisect(seq, func=bool):
"""
Split a sequence into two sequences: the first is elements that
return False for func(element) and the second for True for
func(element).
By default, func is ``bool``, so uses the truth value of the object.
>>> is_odd = lambda n: n%2
>>> even, odd = bisect(range(5), is_odd)
>>> list(odd)
[1, 3]
>>> list(even)
[0, 2, 4]
>>> other, zeros = bisect(reversed(range(5)))
>>> list(zeros)
[0]
>>> list(other)
[4, 3, 2, 1]
"""
queues = GroupbySaved(seq, func)
return queues.get_first_n_queues(2)
|
def bisect(seq, func=bool):
"""
Split a sequence into two sequences: the first is elements that
return False for func(element) and the second for True for
func(element).
By default, func is ``bool``, so uses the truth value of the object.
>>> is_odd = lambda n: n%2
>>> even, odd = bisect(range(5), is_odd)
>>> list(odd)
[1, 3]
>>> list(even)
[0, 2, 4]
>>> other, zeros = bisect(reversed(range(5)))
>>> list(zeros)
[0]
>>> list(other)
[4, 3, 2, 1]
"""
queues = GroupbySaved(seq, func)
return queues.get_first_n_queues(2)
|
[
"Split",
"a",
"sequence",
"into",
"two",
"sequences",
":",
"the",
"first",
"is",
"elements",
"that",
"return",
"False",
"for",
"func",
"(",
"element",
")",
"and",
"the",
"second",
"for",
"True",
"for",
"func",
"(",
"element",
")",
".",
"By",
"default",
"func",
"is",
"bool",
"so",
"uses",
"the",
"truth",
"value",
"of",
"the",
"object",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L54-L76
|
[
"def",
"bisect",
"(",
"seq",
",",
"func",
"=",
"bool",
")",
":",
"queues",
"=",
"GroupbySaved",
"(",
"seq",
",",
"func",
")",
"return",
"queues",
".",
"get_first_n_queues",
"(",
"2",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
grouper_nofill_str
|
Take a sequence and break it up into chunks of the specified size.
The last chunk may be smaller than size.
This works very similar to grouper_nofill, except
it works with strings as well.
>>> tuple(grouper_nofill_str(3, 'foobarbaz'))
('foo', 'bar', 'baz')
You can still use it on non-strings too if you like.
>>> tuple(grouper_nofill_str(42, []))
()
>>> tuple(grouper_nofill_str(3, list(range(10))))
([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
|
jaraco/itertools.py
|
def grouper_nofill_str(n, iterable):
"""
Take a sequence and break it up into chunks of the specified size.
The last chunk may be smaller than size.
This works very similar to grouper_nofill, except
it works with strings as well.
>>> tuple(grouper_nofill_str(3, 'foobarbaz'))
('foo', 'bar', 'baz')
You can still use it on non-strings too if you like.
>>> tuple(grouper_nofill_str(42, []))
()
>>> tuple(grouper_nofill_str(3, list(range(10))))
([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
"""
res = more_itertools.chunked(iterable, n)
if isinstance(iterable, six.string_types):
res = (''.join(item) for item in res)
return res
|
def grouper_nofill_str(n, iterable):
"""
Take a sequence and break it up into chunks of the specified size.
The last chunk may be smaller than size.
This works very similar to grouper_nofill, except
it works with strings as well.
>>> tuple(grouper_nofill_str(3, 'foobarbaz'))
('foo', 'bar', 'baz')
You can still use it on non-strings too if you like.
>>> tuple(grouper_nofill_str(42, []))
()
>>> tuple(grouper_nofill_str(3, list(range(10))))
([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
"""
res = more_itertools.chunked(iterable, n)
if isinstance(iterable, six.string_types):
res = (''.join(item) for item in res)
return res
|
[
"Take",
"a",
"sequence",
"and",
"break",
"it",
"up",
"into",
"chunks",
"of",
"the",
"specified",
"size",
".",
"The",
"last",
"chunk",
"may",
"be",
"smaller",
"than",
"size",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L379-L401
|
[
"def",
"grouper_nofill_str",
"(",
"n",
",",
"iterable",
")",
":",
"res",
"=",
"more_itertools",
".",
"chunked",
"(",
"iterable",
",",
"n",
")",
"if",
"isinstance",
"(",
"iterable",
",",
"six",
".",
"string_types",
")",
":",
"res",
"=",
"(",
"''",
".",
"join",
"(",
"item",
")",
"for",
"item",
"in",
"res",
")",
"return",
"res"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
flatten
|
*Deprecated*: Use more_itertools.collapse instead.
|
jaraco/itertools.py
|
def flatten(subject, test=None):
"""
*Deprecated*: Use more_itertools.collapse instead.
"""
warnings.warn(
"Use more_itertools.collapse instead",
DeprecationWarning,
stacklevel=2)
return list(more_itertools.collapse(subject, base_type=(bytes,)))
|
def flatten(subject, test=None):
"""
*Deprecated*: Use more_itertools.collapse instead.
"""
warnings.warn(
"Use more_itertools.collapse instead",
DeprecationWarning,
stacklevel=2)
return list(more_itertools.collapse(subject, base_type=(bytes,)))
|
[
"*",
"Deprecated",
"*",
":",
"Use",
"more_itertools",
".",
"collapse",
"instead",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L494-L502
|
[
"def",
"flatten",
"(",
"subject",
",",
"test",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Use more_itertools.collapse instead\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"list",
"(",
"more_itertools",
".",
"collapse",
"(",
"subject",
",",
"base_type",
"=",
"(",
"bytes",
",",
")",
")",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
every_other
|
Yield every other item from the iterable
>>> ' '.join(every_other('abcdefg'))
'a c e g'
|
jaraco/itertools.py
|
def every_other(iterable):
"""
Yield every other item from the iterable
>>> ' '.join(every_other('abcdefg'))
'a c e g'
"""
items = iter(iterable)
while True:
try:
yield next(items)
next(items)
except StopIteration:
return
|
def every_other(iterable):
"""
Yield every other item from the iterable
>>> ' '.join(every_other('abcdefg'))
'a c e g'
"""
items = iter(iterable)
while True:
try:
yield next(items)
next(items)
except StopIteration:
return
|
[
"Yield",
"every",
"other",
"item",
"from",
"the",
"iterable"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L573-L586
|
[
"def",
"every_other",
"(",
"iterable",
")",
":",
"items",
"=",
"iter",
"(",
"iterable",
")",
"while",
"True",
":",
"try",
":",
"yield",
"next",
"(",
"items",
")",
"next",
"(",
"items",
")",
"except",
"StopIteration",
":",
"return"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
remove_duplicates
|
Given an iterable with items that may come in as sequential duplicates,
remove those duplicates.
Unlike unique_justseen, this function does not remove triplicates.
>>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc'))
'a b c a b c a a b b c c b c b c'
>>> ' '.join(remove_duplicates('aaaabbbbb'))
'a a b b b'
|
jaraco/itertools.py
|
def remove_duplicates(iterable, key=None):
"""
Given an iterable with items that may come in as sequential duplicates,
remove those duplicates.
Unlike unique_justseen, this function does not remove triplicates.
>>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc'))
'a b c a b c a a b b c c b c b c'
>>> ' '.join(remove_duplicates('aaaabbbbb'))
'a a b b b'
"""
return itertools.chain.from_iterable(six.moves.map(
every_other, six.moves.map(
operator.itemgetter(1),
itertools.groupby(iterable, key)
)))
|
def remove_duplicates(iterable, key=None):
"""
Given an iterable with items that may come in as sequential duplicates,
remove those duplicates.
Unlike unique_justseen, this function does not remove triplicates.
>>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc'))
'a b c a b c a a b b c c b c b c'
>>> ' '.join(remove_duplicates('aaaabbbbb'))
'a a b b b'
"""
return itertools.chain.from_iterable(six.moves.map(
every_other, six.moves.map(
operator.itemgetter(1),
itertools.groupby(iterable, key)
)))
|
[
"Given",
"an",
"iterable",
"with",
"items",
"that",
"may",
"come",
"in",
"as",
"sequential",
"duplicates",
"remove",
"those",
"duplicates",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L589-L605
|
[
"def",
"remove_duplicates",
"(",
"iterable",
",",
"key",
"=",
"None",
")",
":",
"return",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"six",
".",
"moves",
".",
"map",
"(",
"every_other",
",",
"six",
".",
"moves",
".",
"map",
"(",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"itertools",
".",
"groupby",
"(",
"iterable",
",",
"key",
")",
")",
")",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
peek
|
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
|
jaraco/itertools.py
|
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original
|
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original
|
[
"Get",
"the",
"next",
"value",
"from",
"an",
"iterable",
"but",
"also",
"return",
"an",
"iterable",
"that",
"will",
"subsequently",
"return",
"that",
"value",
"and",
"the",
"rest",
"of",
"the",
"original",
"iterable",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L618-L632
|
[
"def",
"peek",
"(",
"iterable",
")",
":",
"peeker",
",",
"original",
"=",
"itertools",
".",
"tee",
"(",
"iterable",
")",
"return",
"next",
"(",
"peeker",
")",
",",
"original"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
takewhile_peek
|
Like takewhile, but takes a peekable iterable and doesn't
consume the non-matching item.
>>> items = Peekable(range(10))
>>> is_small = lambda n: n < 4
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[0, 1, 2, 3]
>>> list(items)
[4, 5, 6, 7, 8, 9]
>>> empty = takewhile_peek(is_small, Peekable([]))
>>> list(empty)
[]
>>> items = Peekable([3])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[3]
>>> list(items)
[]
>>> items = Peekable([4])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[]
>>> list(items)
[4]
|
jaraco/itertools.py
|
def takewhile_peek(predicate, iterable):
"""
Like takewhile, but takes a peekable iterable and doesn't
consume the non-matching item.
>>> items = Peekable(range(10))
>>> is_small = lambda n: n < 4
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[0, 1, 2, 3]
>>> list(items)
[4, 5, 6, 7, 8, 9]
>>> empty = takewhile_peek(is_small, Peekable([]))
>>> list(empty)
[]
>>> items = Peekable([3])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[3]
>>> list(items)
[]
>>> items = Peekable([4])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[]
>>> list(items)
[4]
"""
while True:
try:
if not predicate(iterable.peek()):
break
yield next(iterable)
except StopIteration:
break
|
def takewhile_peek(predicate, iterable):
"""
Like takewhile, but takes a peekable iterable and doesn't
consume the non-matching item.
>>> items = Peekable(range(10))
>>> is_small = lambda n: n < 4
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[0, 1, 2, 3]
>>> list(items)
[4, 5, 6, 7, 8, 9]
>>> empty = takewhile_peek(is_small, Peekable([]))
>>> list(empty)
[]
>>> items = Peekable([3])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[3]
>>> list(items)
[]
>>> items = Peekable([4])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[]
>>> list(items)
[4]
"""
while True:
try:
if not predicate(iterable.peek()):
break
yield next(iterable)
except StopIteration:
break
|
[
"Like",
"takewhile",
"but",
"takes",
"a",
"peekable",
"iterable",
"and",
"doesn",
"t",
"consume",
"the",
"non",
"-",
"matching",
"item",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L683-L723
|
[
"def",
"takewhile_peek",
"(",
"predicate",
",",
"iterable",
")",
":",
"while",
"True",
":",
"try",
":",
"if",
"not",
"predicate",
"(",
"iterable",
".",
"peek",
"(",
")",
")",
":",
"break",
"yield",
"next",
"(",
"iterable",
")",
"except",
"StopIteration",
":",
"break"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
nwise
|
Like pairwise, except returns n-tuples of adjacent items.
s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ...
|
jaraco/itertools.py
|
def nwise(iter, n):
"""
Like pairwise, except returns n-tuples of adjacent items.
s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ...
"""
iterset = [iter]
while len(iterset) < n:
iterset[-1:] = itertools.tee(iterset[-1])
next(iterset[-1], None)
return six.moves.zip(*iterset)
|
def nwise(iter, n):
"""
Like pairwise, except returns n-tuples of adjacent items.
s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ...
"""
iterset = [iter]
while len(iterset) < n:
iterset[-1:] = itertools.tee(iterset[-1])
next(iterset[-1], None)
return six.moves.zip(*iterset)
|
[
"Like",
"pairwise",
"except",
"returns",
"n",
"-",
"tuples",
"of",
"adjacent",
"items",
".",
"s",
"-",
">",
"(",
"s0",
"s1",
"...",
"sn",
")",
"(",
"s1",
"s2",
"...",
"s",
"(",
"n",
"+",
"1",
"))",
"..."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L803-L812
|
[
"def",
"nwise",
"(",
"iter",
",",
"n",
")",
":",
"iterset",
"=",
"[",
"iter",
"]",
"while",
"len",
"(",
"iterset",
")",
"<",
"n",
":",
"iterset",
"[",
"-",
"1",
":",
"]",
"=",
"itertools",
".",
"tee",
"(",
"iterset",
"[",
"-",
"1",
"]",
")",
"next",
"(",
"iterset",
"[",
"-",
"1",
"]",
",",
"None",
")",
"return",
"six",
".",
"moves",
".",
"zip",
"(",
"*",
"iterset",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
window
|
Given an iterable, return a new iterable which yields triples of
(pre, item, post), where pre and post are the items preceeding and
following the item (or None if no such item is appropriate). pre
and post will always be pre_size and post_size in length.
>>> example = window(range(10), pre_size=2)
>>> pre, item, post = next(example)
>>> pre
(None, None)
>>> post
(1,)
>>> next(example)
((None, 0), 1, (2,))
>>> list(example)[-1]
((7, 8), 9, (None,))
|
jaraco/itertools.py
|
def window(iter, pre_size=1, post_size=1):
"""
Given an iterable, return a new iterable which yields triples of
(pre, item, post), where pre and post are the items preceeding and
following the item (or None if no such item is appropriate). pre
and post will always be pre_size and post_size in length.
>>> example = window(range(10), pre_size=2)
>>> pre, item, post = next(example)
>>> pre
(None, None)
>>> post
(1,)
>>> next(example)
((None, 0), 1, (2,))
>>> list(example)[-1]
((7, 8), 9, (None,))
"""
pre_iter, iter = itertools.tee(iter)
pre_iter = itertools.chain((None,) * pre_size, pre_iter)
pre_iter = nwise(pre_iter, pre_size)
post_iter, iter = itertools.tee(iter)
post_iter = itertools.chain(post_iter, (None,) * post_size)
post_iter = nwise(post_iter, post_size)
next(post_iter, None)
return six.moves.zip(pre_iter, iter, post_iter)
|
def window(iter, pre_size=1, post_size=1):
"""
Given an iterable, return a new iterable which yields triples of
(pre, item, post), where pre and post are the items preceeding and
following the item (or None if no such item is appropriate). pre
and post will always be pre_size and post_size in length.
>>> example = window(range(10), pre_size=2)
>>> pre, item, post = next(example)
>>> pre
(None, None)
>>> post
(1,)
>>> next(example)
((None, 0), 1, (2,))
>>> list(example)[-1]
((7, 8), 9, (None,))
"""
pre_iter, iter = itertools.tee(iter)
pre_iter = itertools.chain((None,) * pre_size, pre_iter)
pre_iter = nwise(pre_iter, pre_size)
post_iter, iter = itertools.tee(iter)
post_iter = itertools.chain(post_iter, (None,) * post_size)
post_iter = nwise(post_iter, post_size)
next(post_iter, None)
return six.moves.zip(pre_iter, iter, post_iter)
|
[
"Given",
"an",
"iterable",
"return",
"a",
"new",
"iterable",
"which",
"yields",
"triples",
"of",
"(",
"pre",
"item",
"post",
")",
"where",
"pre",
"and",
"post",
"are",
"the",
"items",
"preceeding",
"and",
"following",
"the",
"item",
"(",
"or",
"None",
"if",
"no",
"such",
"item",
"is",
"appropriate",
")",
".",
"pre",
"and",
"post",
"will",
"always",
"be",
"pre_size",
"and",
"post_size",
"in",
"length",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L815-L840
|
[
"def",
"window",
"(",
"iter",
",",
"pre_size",
"=",
"1",
",",
"post_size",
"=",
"1",
")",
":",
"pre_iter",
",",
"iter",
"=",
"itertools",
".",
"tee",
"(",
"iter",
")",
"pre_iter",
"=",
"itertools",
".",
"chain",
"(",
"(",
"None",
",",
")",
"*",
"pre_size",
",",
"pre_iter",
")",
"pre_iter",
"=",
"nwise",
"(",
"pre_iter",
",",
"pre_size",
")",
"post_iter",
",",
"iter",
"=",
"itertools",
".",
"tee",
"(",
"iter",
")",
"post_iter",
"=",
"itertools",
".",
"chain",
"(",
"post_iter",
",",
"(",
"None",
",",
")",
"*",
"post_size",
")",
"post_iter",
"=",
"nwise",
"(",
"post_iter",
",",
"post_size",
")",
"next",
"(",
"post_iter",
",",
"None",
")",
"return",
"six",
".",
"moves",
".",
"zip",
"(",
"pre_iter",
",",
"iter",
",",
"post_iter",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
partition_items
|
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
|
jaraco/itertools.py
|
def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins
|
def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins
|
[
"Given",
"the",
"total",
"number",
"of",
"items",
"determine",
"the",
"number",
"of",
"items",
"that",
"can",
"be",
"added",
"to",
"each",
"bin",
"with",
"a",
"limit",
"on",
"the",
"bin",
"size",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L856-L877
|
[
"def",
"partition_items",
"(",
"count",
",",
"bin_size",
")",
":",
"num_bins",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"count",
"/",
"float",
"(",
"bin_size",
")",
")",
")",
"bins",
"=",
"[",
"0",
"]",
"*",
"num_bins",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"bins",
"[",
"i",
"%",
"num_bins",
"]",
"+=",
"1",
"return",
"bins"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
balanced_rows
|
Like grouper, but balance the rows to minimize fill per row.
balanced_rows(3, 'ABCDEFG', 'x') --> ABC DEx FGx"
|
jaraco/itertools.py
|
def balanced_rows(n, iterable, fillvalue=None):
"""
Like grouper, but balance the rows to minimize fill per row.
balanced_rows(3, 'ABCDEFG', 'x') --> ABC DEx FGx"
"""
iterable, iterable_copy = itertools.tee(iterable)
count = len(tuple(iterable_copy))
for allocation in partition_items(count, n):
row = itertools.islice(iterable, allocation)
if allocation < n:
row = itertools.chain(row, [fillvalue])
yield tuple(row)
|
def balanced_rows(n, iterable, fillvalue=None):
"""
Like grouper, but balance the rows to minimize fill per row.
balanced_rows(3, 'ABCDEFG', 'x') --> ABC DEx FGx"
"""
iterable, iterable_copy = itertools.tee(iterable)
count = len(tuple(iterable_copy))
for allocation in partition_items(count, n):
row = itertools.islice(iterable, allocation)
if allocation < n:
row = itertools.chain(row, [fillvalue])
yield tuple(row)
|
[
"Like",
"grouper",
"but",
"balance",
"the",
"rows",
"to",
"minimize",
"fill",
"per",
"row",
".",
"balanced_rows",
"(",
"3",
"ABCDEFG",
"x",
")",
"--",
">",
"ABC",
"DEx",
"FGx"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L880-L891
|
[
"def",
"balanced_rows",
"(",
"n",
",",
"iterable",
",",
"fillvalue",
"=",
"None",
")",
":",
"iterable",
",",
"iterable_copy",
"=",
"itertools",
".",
"tee",
"(",
"iterable",
")",
"count",
"=",
"len",
"(",
"tuple",
"(",
"iterable_copy",
")",
")",
"for",
"allocation",
"in",
"partition_items",
"(",
"count",
",",
"n",
")",
":",
"row",
"=",
"itertools",
".",
"islice",
"(",
"iterable",
",",
"allocation",
")",
"if",
"allocation",
"<",
"n",
":",
"row",
"=",
"itertools",
".",
"chain",
"(",
"row",
",",
"[",
"fillvalue",
"]",
")",
"yield",
"tuple",
"(",
"row",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
always_iterable
|
Given an object, always return an iterable. If the item is not
already iterable, return a tuple containing only the item. If item is
None, an empty iterable is returned.
>>> always_iterable([1,2,3])
<list_iterator...>
>>> always_iterable('foo')
<tuple_iterator...>
>>> always_iterable(None)
<tuple_iterator...>
>>> always_iterable(range(10))
<range_iterator...>
>>> def _test_func(): yield "I'm iterable"
>>> print(next(always_iterable(_test_func())))
I'm iterable
Although mappings are iterable, treat each like a singleton, as
it's more like an object than a sequence.
>>> next(always_iterable(dict(a=1)))
{'a': 1}
|
jaraco/itertools.py
|
def always_iterable(item):
"""
Given an object, always return an iterable. If the item is not
already iterable, return a tuple containing only the item. If item is
None, an empty iterable is returned.
>>> always_iterable([1,2,3])
<list_iterator...>
>>> always_iterable('foo')
<tuple_iterator...>
>>> always_iterable(None)
<tuple_iterator...>
>>> always_iterable(range(10))
<range_iterator...>
>>> def _test_func(): yield "I'm iterable"
>>> print(next(always_iterable(_test_func())))
I'm iterable
Although mappings are iterable, treat each like a singleton, as
it's more like an object than a sequence.
>>> next(always_iterable(dict(a=1)))
{'a': 1}
"""
base_types = six.text_type, bytes, collections.abc.Mapping
return more_itertools.always_iterable(item, base_type=base_types)
|
def always_iterable(item):
"""
Given an object, always return an iterable. If the item is not
already iterable, return a tuple containing only the item. If item is
None, an empty iterable is returned.
>>> always_iterable([1,2,3])
<list_iterator...>
>>> always_iterable('foo')
<tuple_iterator...>
>>> always_iterable(None)
<tuple_iterator...>
>>> always_iterable(range(10))
<range_iterator...>
>>> def _test_func(): yield "I'm iterable"
>>> print(next(always_iterable(_test_func())))
I'm iterable
Although mappings are iterable, treat each like a singleton, as
it's more like an object than a sequence.
>>> next(always_iterable(dict(a=1)))
{'a': 1}
"""
base_types = six.text_type, bytes, collections.abc.Mapping
return more_itertools.always_iterable(item, base_type=base_types)
|
[
"Given",
"an",
"object",
"always",
"return",
"an",
"iterable",
".",
"If",
"the",
"item",
"is",
"not",
"already",
"iterable",
"return",
"a",
"tuple",
"containing",
"only",
"the",
"item",
".",
"If",
"item",
"is",
"None",
"an",
"empty",
"iterable",
"is",
"returned",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L903-L928
|
[
"def",
"always_iterable",
"(",
"item",
")",
":",
"base_types",
"=",
"six",
".",
"text_type",
",",
"bytes",
",",
"collections",
".",
"abc",
".",
"Mapping",
"return",
"more_itertools",
".",
"always_iterable",
"(",
"item",
",",
"base_type",
"=",
"base_types",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
suppress_exceptions
|
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
|
jaraco/itertools.py
|
def suppress_exceptions(callables, *exceptions):
"""
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
"""
if not exceptions:
exceptions = Exception,
for callable in callables:
try:
yield callable()
except exceptions:
pass
|
def suppress_exceptions(callables, *exceptions):
"""
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
"""
if not exceptions:
exceptions = Exception,
for callable in callables:
try:
yield callable()
except exceptions:
pass
|
[
"Call",
"each",
"callable",
"in",
"callables",
"suppressing",
"any",
"exceptions",
"supplied",
".",
"If",
"no",
"exception",
"classes",
"are",
"supplied",
"all",
"Exceptions",
"will",
"be",
"suppressed",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L931-L952
|
[
"def",
"suppress_exceptions",
"(",
"callables",
",",
"*",
"exceptions",
")",
":",
"if",
"not",
"exceptions",
":",
"exceptions",
"=",
"Exception",
",",
"for",
"callable",
"in",
"callables",
":",
"try",
":",
"yield",
"callable",
"(",
")",
"except",
"exceptions",
":",
"pass"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
duplicates
|
Yield duplicate items from any number of sorted iterables of items
>>> items_a = [1, 2, 3]
>>> items_b = [0, 3, 4, 5, 6]
>>> list(duplicates(items_a, items_b))
[(3, 3)]
It won't behave as you expect if the iterables aren't ordered
>>> items_b.append(1)
>>> list(duplicates(items_a, items_b))
[(3, 3)]
>>> list(duplicates(items_a, sorted(items_b)))
[(1, 1), (3, 3)]
This function is most interesting when it's operating on a key
of more complex objects.
>>> items_a = [dict(email='joe@example.com', id=1)]
>>> items_b = [dict(email='joe@example.com', id=2), dict(email='other')]
>>> dupe, = duplicates(items_a, items_b, key=operator.itemgetter('email'))
>>> dupe[0]['email'] == dupe[1]['email'] == 'joe@example.com'
True
>>> dupe[0]['id']
1
>>> dupe[1]['id']
2
|
jaraco/itertools.py
|
def duplicates(*iterables, **kwargs):
"""
Yield duplicate items from any number of sorted iterables of items
>>> items_a = [1, 2, 3]
>>> items_b = [0, 3, 4, 5, 6]
>>> list(duplicates(items_a, items_b))
[(3, 3)]
It won't behave as you expect if the iterables aren't ordered
>>> items_b.append(1)
>>> list(duplicates(items_a, items_b))
[(3, 3)]
>>> list(duplicates(items_a, sorted(items_b)))
[(1, 1), (3, 3)]
This function is most interesting when it's operating on a key
of more complex objects.
>>> items_a = [dict(email='joe@example.com', id=1)]
>>> items_b = [dict(email='joe@example.com', id=2), dict(email='other')]
>>> dupe, = duplicates(items_a, items_b, key=operator.itemgetter('email'))
>>> dupe[0]['email'] == dupe[1]['email'] == 'joe@example.com'
True
>>> dupe[0]['id']
1
>>> dupe[1]['id']
2
"""
key = kwargs.pop('key', lambda x: x)
assert not kwargs
zipped = more_itertools.collate(*iterables, key=key)
grouped = itertools.groupby(zipped, key=key)
groups = (
tuple(g)
for k, g in grouped
)
def has_dupes(group):
return len(group) > 1
return filter(has_dupes, groups)
|
def duplicates(*iterables, **kwargs):
"""
Yield duplicate items from any number of sorted iterables of items
>>> items_a = [1, 2, 3]
>>> items_b = [0, 3, 4, 5, 6]
>>> list(duplicates(items_a, items_b))
[(3, 3)]
It won't behave as you expect if the iterables aren't ordered
>>> items_b.append(1)
>>> list(duplicates(items_a, items_b))
[(3, 3)]
>>> list(duplicates(items_a, sorted(items_b)))
[(1, 1), (3, 3)]
This function is most interesting when it's operating on a key
of more complex objects.
>>> items_a = [dict(email='joe@example.com', id=1)]
>>> items_b = [dict(email='joe@example.com', id=2), dict(email='other')]
>>> dupe, = duplicates(items_a, items_b, key=operator.itemgetter('email'))
>>> dupe[0]['email'] == dupe[1]['email'] == 'joe@example.com'
True
>>> dupe[0]['id']
1
>>> dupe[1]['id']
2
"""
key = kwargs.pop('key', lambda x: x)
assert not kwargs
zipped = more_itertools.collate(*iterables, key=key)
grouped = itertools.groupby(zipped, key=key)
groups = (
tuple(g)
for k, g in grouped
)
def has_dupes(group):
return len(group) > 1
return filter(has_dupes, groups)
|
[
"Yield",
"duplicate",
"items",
"from",
"any",
"number",
"of",
"sorted",
"iterables",
"of",
"items"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1012-L1053
|
[
"def",
"duplicates",
"(",
"*",
"iterables",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"kwargs",
".",
"pop",
"(",
"'key'",
",",
"lambda",
"x",
":",
"x",
")",
"assert",
"not",
"kwargs",
"zipped",
"=",
"more_itertools",
".",
"collate",
"(",
"*",
"iterables",
",",
"key",
"=",
"key",
")",
"grouped",
"=",
"itertools",
".",
"groupby",
"(",
"zipped",
",",
"key",
"=",
"key",
")",
"groups",
"=",
"(",
"tuple",
"(",
"g",
")",
"for",
"k",
",",
"g",
"in",
"grouped",
")",
"def",
"has_dupes",
"(",
"group",
")",
":",
"return",
"len",
"(",
"group",
")",
">",
"1",
"return",
"filter",
"(",
"has_dupes",
",",
"groups",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
assert_ordered
|
Assert that for all items in the iterable, they're in order based on comp
>>> list(assert_ordered(range(5)))
[0, 1, 2, 3, 4]
>>> list(assert_ordered(range(5), comp=operator.ge))
Traceback (most recent call last):
...
AssertionError: 0 < 1
>>> list(assert_ordered(range(5, 0, -1), key=operator.neg))
[5, 4, 3, 2, 1]
|
jaraco/itertools.py
|
def assert_ordered(iterable, key=lambda x: x, comp=operator.le):
"""
Assert that for all items in the iterable, they're in order based on comp
>>> list(assert_ordered(range(5)))
[0, 1, 2, 3, 4]
>>> list(assert_ordered(range(5), comp=operator.ge))
Traceback (most recent call last):
...
AssertionError: 0 < 1
>>> list(assert_ordered(range(5, 0, -1), key=operator.neg))
[5, 4, 3, 2, 1]
"""
err_tmpl = (
"{pair[0]} > {pair[1]}" if comp is operator.le else
"{pair[0]} < {pair[1]}" if comp is operator.ge else
"not {comp} {pair}"
)
for pair in more_itertools.pairwise(iterable):
keyed = tuple(map(key, pair))
assert comp(*keyed), err_tmpl.format(**locals())
yield pair[0]
yield pair[1]
|
def assert_ordered(iterable, key=lambda x: x, comp=operator.le):
"""
Assert that for all items in the iterable, they're in order based on comp
>>> list(assert_ordered(range(5)))
[0, 1, 2, 3, 4]
>>> list(assert_ordered(range(5), comp=operator.ge))
Traceback (most recent call last):
...
AssertionError: 0 < 1
>>> list(assert_ordered(range(5, 0, -1), key=operator.neg))
[5, 4, 3, 2, 1]
"""
err_tmpl = (
"{pair[0]} > {pair[1]}" if comp is operator.le else
"{pair[0]} < {pair[1]}" if comp is operator.ge else
"not {comp} {pair}"
)
for pair in more_itertools.pairwise(iterable):
keyed = tuple(map(key, pair))
assert comp(*keyed), err_tmpl.format(**locals())
yield pair[0]
yield pair[1]
|
[
"Assert",
"that",
"for",
"all",
"items",
"in",
"the",
"iterable",
"they",
"re",
"in",
"order",
"based",
"on",
"comp"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1056-L1078
|
[
"def",
"assert_ordered",
"(",
"iterable",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
",",
"comp",
"=",
"operator",
".",
"le",
")",
":",
"err_tmpl",
"=",
"(",
"\"{pair[0]} > {pair[1]}\"",
"if",
"comp",
"is",
"operator",
".",
"le",
"else",
"\"{pair[0]} < {pair[1]}\"",
"if",
"comp",
"is",
"operator",
".",
"ge",
"else",
"\"not {comp} {pair}\"",
")",
"for",
"pair",
"in",
"more_itertools",
".",
"pairwise",
"(",
"iterable",
")",
":",
"keyed",
"=",
"tuple",
"(",
"map",
"(",
"key",
",",
"pair",
")",
")",
"assert",
"comp",
"(",
"*",
"keyed",
")",
",",
"err_tmpl",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"yield",
"pair",
"[",
"0",
"]",
"yield",
"pair",
"[",
"1",
"]"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
collate_revs
|
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
|
jaraco/itertools.py
|
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
"""
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
"""
missing = object()
def maybe_merge(*items):
"""
Merge any non-null items
"""
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict(
(key(el), el)
for el in new
)
old_items = collections.OrderedDict(
(key(el), el)
for el in old
)
# use the old_items as a reference
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
# yield all new items that appear before the matching key
before, match_new, new_items = _swap_on_miss(
partition_dict(new_items, old_key))
for new_key, new_item in before.items():
# ensure any new keys are merged with previous items if
# they exist
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
# finally, yield whatever is leftover
# yield from new_items.values()
for item in new_items.values():
yield item
|
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
"""
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
"""
missing = object()
def maybe_merge(*items):
"""
Merge any non-null items
"""
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict(
(key(el), el)
for el in new
)
old_items = collections.OrderedDict(
(key(el), el)
for el in old
)
# use the old_items as a reference
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
# yield all new items that appear before the matching key
before, match_new, new_items = _swap_on_miss(
partition_dict(new_items, old_key))
for new_key, new_item in before.items():
# ensure any new keys are merged with previous items if
# they exist
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
# finally, yield whatever is leftover
# yield from new_items.values()
for item in new_items.values():
yield item
|
[
"Given",
"revision",
"sets",
"old",
"and",
"new",
"each",
"containing",
"a",
"series",
"of",
"revisions",
"of",
"some",
"set",
"of",
"objects",
"collate",
"them",
"based",
"on",
"these",
"rules",
":"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1081-L1187
|
[
"def",
"collate_revs",
"(",
"old",
",",
"new",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
",",
"merge",
"=",
"lambda",
"old",
",",
"new",
":",
"new",
")",
":",
"missing",
"=",
"object",
"(",
")",
"def",
"maybe_merge",
"(",
"*",
"items",
")",
":",
"\"\"\"\n\t\tMerge any non-null items\n\t\t\"\"\"",
"def",
"not_missing",
"(",
"ob",
")",
":",
"return",
"ob",
"is",
"not",
"missing",
"return",
"functools",
".",
"reduce",
"(",
"merge",
",",
"filter",
"(",
"not_missing",
",",
"items",
")",
")",
"new_items",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"key",
"(",
"el",
")",
",",
"el",
")",
"for",
"el",
"in",
"new",
")",
"old_items",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"key",
"(",
"el",
")",
",",
"el",
")",
"for",
"el",
"in",
"old",
")",
"# use the old_items as a reference",
"for",
"old_key",
",",
"old_item",
"in",
"_mutable_iter",
"(",
"old_items",
")",
":",
"if",
"old_key",
"not",
"in",
"new_items",
":",
"yield",
"old_item",
"continue",
"# yield all new items that appear before the matching key",
"before",
",",
"match_new",
",",
"new_items",
"=",
"_swap_on_miss",
"(",
"partition_dict",
"(",
"new_items",
",",
"old_key",
")",
")",
"for",
"new_key",
",",
"new_item",
"in",
"before",
".",
"items",
"(",
")",
":",
"# ensure any new keys are merged with previous items if",
"# they exist",
"yield",
"maybe_merge",
"(",
"new_item",
",",
"old_items",
".",
"pop",
"(",
"new_key",
",",
"missing",
")",
")",
"yield",
"merge",
"(",
"old_item",
",",
"match_new",
")",
"# finally, yield whatever is leftover",
"# yield from new_items.values()",
"for",
"item",
"in",
"new_items",
".",
"values",
"(",
")",
":",
"yield",
"item"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
_mutable_iter
|
Iterate over items in the dict, yielding the first one, but allowing
it to be mutated during the process.
>>> d = dict(a=1)
>>> it = _mutable_iter(d)
>>> next(it)
('a', 1)
>>> d
{}
>>> d.update(b=2)
>>> list(it)
[('b', 2)]
|
jaraco/itertools.py
|
def _mutable_iter(dict):
"""
Iterate over items in the dict, yielding the first one, but allowing
it to be mutated during the process.
>>> d = dict(a=1)
>>> it = _mutable_iter(d)
>>> next(it)
('a', 1)
>>> d
{}
>>> d.update(b=2)
>>> list(it)
[('b', 2)]
"""
while dict:
prev_key = next(iter(dict))
yield prev_key, dict.pop(prev_key)
|
def _mutable_iter(dict):
"""
Iterate over items in the dict, yielding the first one, but allowing
it to be mutated during the process.
>>> d = dict(a=1)
>>> it = _mutable_iter(d)
>>> next(it)
('a', 1)
>>> d
{}
>>> d.update(b=2)
>>> list(it)
[('b', 2)]
"""
while dict:
prev_key = next(iter(dict))
yield prev_key, dict.pop(prev_key)
|
[
"Iterate",
"over",
"items",
"in",
"the",
"dict",
"yielding",
"the",
"first",
"one",
"but",
"allowing",
"it",
"to",
"be",
"mutated",
"during",
"the",
"process",
".",
">>>",
"d",
"=",
"dict",
"(",
"a",
"=",
"1",
")",
">>>",
"it",
"=",
"_mutable_iter",
"(",
"d",
")",
">>>",
"next",
"(",
"it",
")",
"(",
"a",
"1",
")",
">>>",
"d",
"{}",
">>>",
"d",
".",
"update",
"(",
"b",
"=",
"2",
")",
">>>",
"list",
"(",
"it",
")",
"[",
"(",
"b",
"2",
")",
"]"
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1190-L1206
|
[
"def",
"_mutable_iter",
"(",
"dict",
")",
":",
"while",
"dict",
":",
"prev_key",
"=",
"next",
"(",
"iter",
"(",
"dict",
")",
")",
"yield",
"prev_key",
",",
"dict",
".",
"pop",
"(",
"prev_key",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
_swap_on_miss
|
Given a partition_dict result, if the partition missed, swap
the before and after.
|
jaraco/itertools.py
|
def _swap_on_miss(partition_result):
"""
Given a partition_dict result, if the partition missed, swap
the before and after.
"""
before, item, after = partition_result
return (before, item, after) if item else (after, item, before)
|
def _swap_on_miss(partition_result):
"""
Given a partition_dict result, if the partition missed, swap
the before and after.
"""
before, item, after = partition_result
return (before, item, after) if item else (after, item, before)
|
[
"Given",
"a",
"partition_dict",
"result",
"if",
"the",
"partition",
"missed",
"swap",
"the",
"before",
"and",
"after",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1209-L1215
|
[
"def",
"_swap_on_miss",
"(",
"partition_result",
")",
":",
"before",
",",
"item",
",",
"after",
"=",
"partition_result",
"return",
"(",
"before",
",",
"item",
",",
"after",
")",
"if",
"item",
"else",
"(",
"after",
",",
"item",
",",
"before",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
partition_dict
|
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
|
jaraco/itertools.py
|
def partition_dict(items, key):
"""
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
"""
def unmatched(pair):
test_key, item, = pair
return test_key != key
items_iter = iter(items.items())
item = items.get(key)
left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter))
right = collections.OrderedDict(items_iter)
return left, item, right
|
def partition_dict(items, key):
"""
Given an ordered dictionary of items and a key in that dict,
return an ordered dict of items before, the keyed item, and
an ordered dict of items after.
>>> od = collections.OrderedDict(zip(range(5), 'abcde'))
>>> before, item, after = partition_dict(od, 3)
>>> before
OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')])
>>> item
'd'
>>> after
OrderedDict([(4, 'e')])
Like string.partition, if the key is not found in the items,
the before will contain all items, item will be None, and
after will be an empty iterable.
>>> before, item, after = partition_dict(od, -1)
>>> before
OrderedDict([(0, 'a'), ..., (4, 'e')])
>>> item
>>> list(after)
[]
"""
def unmatched(pair):
test_key, item, = pair
return test_key != key
items_iter = iter(items.items())
item = items.get(key)
left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter))
right = collections.OrderedDict(items_iter)
return left, item, right
|
[
"Given",
"an",
"ordered",
"dictionary",
"of",
"items",
"and",
"a",
"key",
"in",
"that",
"dict",
"return",
"an",
"ordered",
"dict",
"of",
"items",
"before",
"the",
"keyed",
"item",
"and",
"an",
"ordered",
"dict",
"of",
"items",
"after",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1218-L1252
|
[
"def",
"partition_dict",
"(",
"items",
",",
"key",
")",
":",
"def",
"unmatched",
"(",
"pair",
")",
":",
"test_key",
",",
"item",
",",
"=",
"pair",
"return",
"test_key",
"!=",
"key",
"items_iter",
"=",
"iter",
"(",
"items",
".",
"items",
"(",
")",
")",
"item",
"=",
"items",
".",
"get",
"(",
"key",
")",
"left",
"=",
"collections",
".",
"OrderedDict",
"(",
"itertools",
".",
"takewhile",
"(",
"unmatched",
",",
"items_iter",
")",
")",
"right",
"=",
"collections",
".",
"OrderedDict",
"(",
"items_iter",
")",
"return",
"left",
",",
"item",
",",
"right"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
GroupbySaved.get_first_n_queues
|
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
|
jaraco/itertools.py
|
def get_first_n_queues(self, n):
"""
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
"""
try:
while len(self.queues) < n:
self.__fetch__()
except StopIteration:
pass
values = list(self.queues.values())
missing = n - len(values)
values.extend(iter([]) for n in range(missing))
return values
|
def get_first_n_queues(self, n):
"""
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
"""
try:
while len(self.queues) < n:
self.__fetch__()
except StopIteration:
pass
values = list(self.queues.values())
missing = n - len(values)
values.extend(iter([]) for n in range(missing))
return values
|
[
"Run",
"through",
"the",
"sequence",
"until",
"n",
"queues",
"are",
"created",
"and",
"return",
"them",
".",
"If",
"fewer",
"are",
"created",
"return",
"those",
"plus",
"empty",
"iterables",
"to",
"compensate",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L140-L154
|
[
"def",
"get_first_n_queues",
"(",
"self",
",",
"n",
")",
":",
"try",
":",
"while",
"len",
"(",
"self",
".",
"queues",
")",
"<",
"n",
":",
"self",
".",
"__fetch__",
"(",
")",
"except",
"StopIteration",
":",
"pass",
"values",
"=",
"list",
"(",
"self",
".",
"queues",
".",
"values",
"(",
")",
")",
"missing",
"=",
"n",
"-",
"len",
"(",
"values",
")",
"values",
".",
"extend",
"(",
"iter",
"(",
"[",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"missing",
")",
")",
"return",
"values"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
Reusable.reset
|
Resets the iterator to the start.
Any remaining values in the current iteration are discarded.
|
jaraco/itertools.py
|
def reset(self):
"""
Resets the iterator to the start.
Any remaining values in the current iteration are discarded.
"""
self.__iterator, self.__saved = itertools.tee(self.__saved)
|
def reset(self):
"""
Resets the iterator to the start.
Any remaining values in the current iteration are discarded.
"""
self.__iterator, self.__saved = itertools.tee(self.__saved)
|
[
"Resets",
"the",
"iterator",
"to",
"the",
"start",
"."
] |
jaraco/jaraco.itertools
|
python
|
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L554-L560
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"__iterator",
",",
"self",
".",
"__saved",
"=",
"itertools",
".",
"tee",
"(",
"self",
".",
"__saved",
")"
] |
0dc47c8924fa3d9ab676c3a6e195f03f728b72c6
|
test
|
parse_as_var
|
Parse the remainder of the token, to find a "as varname" statement.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
|
tag_parser/parser.py
|
def parse_as_var(parser, token):
"""
Parse the remainder of the token, to find a "as varname" statement.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
as_var = None
if len(bits) > 2 and bits[-2] == 'as':
bits = bits[:]
as_var = bits.pop()
bits.pop() # as keyword
return bits, as_var
|
def parse_as_var(parser, token):
"""
Parse the remainder of the token, to find a "as varname" statement.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
as_var = None
if len(bits) > 2 and bits[-2] == 'as':
bits = bits[:]
as_var = bits.pop()
bits.pop() # as keyword
return bits, as_var
|
[
"Parse",
"the",
"remainder",
"of",
"the",
"token",
"to",
"find",
"a",
"as",
"varname",
"statement",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/parser.py#L13-L33
|
[
"def",
"parse_as_var",
"(",
"parser",
",",
"token",
")",
":",
"if",
"isinstance",
"(",
"token",
",",
"Token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"else",
":",
"bits",
"=",
"token",
"as_var",
"=",
"None",
"if",
"len",
"(",
"bits",
")",
">",
"2",
"and",
"bits",
"[",
"-",
"2",
"]",
"==",
"'as'",
":",
"bits",
"=",
"bits",
"[",
":",
"]",
"as_var",
"=",
"bits",
".",
"pop",
"(",
")",
"bits",
".",
"pop",
"(",
")",
"# as keyword",
"return",
"bits",
",",
"as_var"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
parse_token_kwargs
|
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
:param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check.
:type allowed_kwargs: tuple
:return: The tag name, arguments and keyword arguments.
:rtype: tuple(tag_name, args, kwargs)
|
tag_parser/parser.py
|
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True):
"""
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
:param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check.
:type allowed_kwargs: tuple
:return: The tag name, arguments and keyword arguments.
:rtype: tuple(tag_name, args, kwargs)
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
expect_kwarg = False
args = []
kwargs = {}
prev_bit = None
tag_name = bits[0]
for bit in bits[1::]:
kwarg_match = kwarg_re.match(bit)
if kwarg_match:
# Keyword argument
expect_kwarg = True
(name, expr) = bit.split('=', 2)
kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr
else:
# Still at positioned arguments.
if expect_kwarg:
raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit))
args.append(parser.compile_filter(bit) if compile_args else bit)
prev_bit = bit
# Validate the allowed arguments, to make things easier for template developers
if allowed_kwargs is not None and kwargs:
if not allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.")
for name in kwargs:
if name not in allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs)))
return tag_name, args, kwargs
|
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True):
"""
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
:param parser: The "parser" object that ``@register.tag`` provides.
:type parser: :class:`~django.template.Parser`
:param token: The "token" object that ``@register.tag`` provides.
:type token: :class:`~django.template.Token` or splitted bits
:param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`.
:param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check.
:type allowed_kwargs: tuple
:return: The tag name, arguments and keyword arguments.
:rtype: tuple(tag_name, args, kwargs)
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
expect_kwarg = False
args = []
kwargs = {}
prev_bit = None
tag_name = bits[0]
for bit in bits[1::]:
kwarg_match = kwarg_re.match(bit)
if kwarg_match:
# Keyword argument
expect_kwarg = True
(name, expr) = bit.split('=', 2)
kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr
else:
# Still at positioned arguments.
if expect_kwarg:
raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit))
args.append(parser.compile_filter(bit) if compile_args else bit)
prev_bit = bit
# Validate the allowed arguments, to make things easier for template developers
if allowed_kwargs is not None and kwargs:
if not allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.")
for name in kwargs:
if name not in allowed_kwargs:
raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs)))
return tag_name, args, kwargs
|
[
"Allow",
"the",
"template",
"tag",
"arguments",
"to",
"be",
"like",
"a",
"normal",
"Python",
"function",
"with",
"*",
"args",
"and",
"**",
"kwargs",
"."
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/parser.py#L36-L87
|
[
"def",
"parse_token_kwargs",
"(",
"parser",
",",
"token",
",",
"allowed_kwargs",
"=",
"None",
",",
"compile_args",
"=",
"True",
",",
"compile_kwargs",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"token",
",",
"Token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"else",
":",
"bits",
"=",
"token",
"expect_kwarg",
"=",
"False",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"prev_bit",
"=",
"None",
"tag_name",
"=",
"bits",
"[",
"0",
"]",
"for",
"bit",
"in",
"bits",
"[",
"1",
":",
":",
"]",
":",
"kwarg_match",
"=",
"kwarg_re",
".",
"match",
"(",
"bit",
")",
"if",
"kwarg_match",
":",
"# Keyword argument",
"expect_kwarg",
"=",
"True",
"(",
"name",
",",
"expr",
")",
"=",
"bit",
".",
"split",
"(",
"'='",
",",
"2",
")",
"kwargs",
"[",
"name",
"]",
"=",
"parser",
".",
"compile_filter",
"(",
"expr",
")",
"if",
"compile_kwargs",
"else",
"expr",
"else",
":",
"# Still at positioned arguments.",
"if",
"expect_kwarg",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).\"",
".",
"format",
"(",
"bits",
"[",
"0",
"]",
",",
"bit",
",",
"prev_bit",
")",
")",
"args",
".",
"append",
"(",
"parser",
".",
"compile_filter",
"(",
"bit",
")",
"if",
"compile_args",
"else",
"bit",
")",
"prev_bit",
"=",
"bit",
"# Validate the allowed arguments, to make things easier for template developers",
"if",
"allowed_kwargs",
"is",
"not",
"None",
"and",
"kwargs",
":",
"if",
"not",
"allowed_kwargs",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"The option %s=... cannot be used in '%s'.\\nNo keyword arguments are allowed.\"",
")",
"for",
"name",
"in",
"kwargs",
":",
"if",
"name",
"not",
"in",
"allowed_kwargs",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"The option %s=... cannot be used in '%s'.\\nPossible options are: %s.\"",
"%",
"(",
"name",
",",
"bits",
"[",
"0",
"]",
",",
"\", \"",
".",
"join",
"(",
"allowed_kwargs",
")",
")",
")",
"return",
"tag_name",
",",
"args",
",",
"kwargs"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
template_tag
|
Decorator to register class tags
:param library: The template tag library, typically instantiated as ``register = Library()``.
:type library: :class:`~django.template.Library`
:param name: The name of the template tag
:type name: str
Example:
.. code-block:: python
@template_tag(register, 'my_tag')
class MyTag(BaseNode):
pass
|
tag_parser/decorators.py
|
def template_tag(library, name):
"""
Decorator to register class tags
:param library: The template tag library, typically instantiated as ``register = Library()``.
:type library: :class:`~django.template.Library`
:param name: The name of the template tag
:type name: str
Example:
.. code-block:: python
@template_tag(register, 'my_tag')
class MyTag(BaseNode):
pass
"""
def _inner(cls):
if hasattr(cls, 'parse'):
compile_function = cls.parse
else:
# Hope that it's either a function, or cls with __init__(self, parser, token) method.
compile_function = cls
library.tag(name, compile_function)
return cls # Return the class body to keep it in the namespace of the module
return _inner
|
def template_tag(library, name):
"""
Decorator to register class tags
:param library: The template tag library, typically instantiated as ``register = Library()``.
:type library: :class:`~django.template.Library`
:param name: The name of the template tag
:type name: str
Example:
.. code-block:: python
@template_tag(register, 'my_tag')
class MyTag(BaseNode):
pass
"""
def _inner(cls):
if hasattr(cls, 'parse'):
compile_function = cls.parse
else:
# Hope that it's either a function, or cls with __init__(self, parser, token) method.
compile_function = cls
library.tag(name, compile_function)
return cls # Return the class body to keep it in the namespace of the module
return _inner
|
[
"Decorator",
"to",
"register",
"class",
"tags"
] |
edoburu/django-tag-parser
|
python
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/decorators.py#L6-L33
|
[
"def",
"template_tag",
"(",
"library",
",",
"name",
")",
":",
"def",
"_inner",
"(",
"cls",
")",
":",
"if",
"hasattr",
"(",
"cls",
",",
"'parse'",
")",
":",
"compile_function",
"=",
"cls",
".",
"parse",
"else",
":",
"# Hope that it's either a function, or cls with __init__(self, parser, token) method.",
"compile_function",
"=",
"cls",
"library",
".",
"tag",
"(",
"name",
",",
"compile_function",
")",
"return",
"cls",
"# Return the class body to keep it in the namespace of the module",
"return",
"_inner"
] |
c24256cfdd0248434f2e3df3444ed9f945d4181f
|
test
|
PublicKeychain.descendant
|
A descendant is a child many steps down.
|
keychain/public_keychain.py
|
def descendant(self, chain_path):
""" A descendant is a child many steps down.
"""
public_child = self.hdkeychain
chain_step_bytes = 4
max_bits_per_step = 2**31
chain_steps = [
int(chain_path[i:i+chain_step_bytes*2], 16) % max_bits_per_step
for i in range(0, len(chain_path), chain_step_bytes*2)
]
for step in chain_steps:
public_child = public_child.get_child(step)
return PublicKeychain(public_child)
|
def descendant(self, chain_path):
""" A descendant is a child many steps down.
"""
public_child = self.hdkeychain
chain_step_bytes = 4
max_bits_per_step = 2**31
chain_steps = [
int(chain_path[i:i+chain_step_bytes*2], 16) % max_bits_per_step
for i in range(0, len(chain_path), chain_step_bytes*2)
]
for step in chain_steps:
public_child = public_child.get_child(step)
return PublicKeychain(public_child)
|
[
"A",
"descendant",
"is",
"a",
"child",
"many",
"steps",
"down",
"."
] |
blockstack-packages/keychain-manager-py
|
python
|
https://github.com/blockstack-packages/keychain-manager-py/blob/c15c4ed8f3ed155f71ccac7c13ee08f081d38c06/keychain/public_keychain.py#L28-L41
|
[
"def",
"descendant",
"(",
"self",
",",
"chain_path",
")",
":",
"public_child",
"=",
"self",
".",
"hdkeychain",
"chain_step_bytes",
"=",
"4",
"max_bits_per_step",
"=",
"2",
"**",
"31",
"chain_steps",
"=",
"[",
"int",
"(",
"chain_path",
"[",
"i",
":",
"i",
"+",
"chain_step_bytes",
"*",
"2",
"]",
",",
"16",
")",
"%",
"max_bits_per_step",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"chain_path",
")",
",",
"chain_step_bytes",
"*",
"2",
")",
"]",
"for",
"step",
"in",
"chain_steps",
":",
"public_child",
"=",
"public_child",
".",
"get_child",
"(",
"step",
")",
"return",
"PublicKeychain",
"(",
"public_child",
")"
] |
c15c4ed8f3ed155f71ccac7c13ee08f081d38c06
|
test
|
bip32_serialize
|
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
|
keychain/utils.py
|
def bip32_serialize(rawtuple):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
i = encode(i, 256, 4)
chaincode = encode(hash_to_int(chaincode), 256, 32)
keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key
bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata
return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58)
|
def bip32_serialize(rawtuple):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
i = encode(i, 256, 4)
chaincode = encode(hash_to_int(chaincode), 256, 32)
keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key
bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata
return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58)
|
[
"Derived",
"from",
"code",
"from",
"pybitcointools",
"(",
"https",
":",
"//",
"github",
".",
"com",
"/",
"vbuterin",
"/",
"pybitcointools",
")",
"by",
"Vitalik",
"Buterin"
] |
blockstack-packages/keychain-manager-py
|
python
|
https://github.com/blockstack-packages/keychain-manager-py/blob/c15c4ed8f3ed155f71ccac7c13ee08f081d38c06/keychain/utils.py#L28-L38
|
[
"def",
"bip32_serialize",
"(",
"rawtuple",
")",
":",
"vbytes",
",",
"depth",
",",
"fingerprint",
",",
"i",
",",
"chaincode",
",",
"key",
"=",
"rawtuple",
"i",
"=",
"encode",
"(",
"i",
",",
"256",
",",
"4",
")",
"chaincode",
"=",
"encode",
"(",
"hash_to_int",
"(",
"chaincode",
")",
",",
"256",
",",
"32",
")",
"keydata",
"=",
"b'\\x00'",
"+",
"key",
"[",
":",
"-",
"1",
"]",
"if",
"vbytes",
"in",
"PRIVATE",
"else",
"key",
"bindata",
"=",
"vbytes",
"+",
"from_int_to_byte",
"(",
"depth",
"%",
"256",
")",
"+",
"fingerprint",
"+",
"i",
"+",
"chaincode",
"+",
"keydata",
"return",
"changebase",
"(",
"bindata",
"+",
"bin_dbl_sha256",
"(",
"bindata",
")",
"[",
":",
"4",
"]",
",",
"256",
",",
"58",
")"
] |
c15c4ed8f3ed155f71ccac7c13ee08f081d38c06
|
test
|
bip32_deserialize
|
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
|
keychain/utils.py
|
def bip32_deserialize(data):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
dbin = changebase(data, 58, 256)
if bin_dbl_sha256(dbin[:-4])[:4] != dbin[-4:]:
raise Exception("Invalid checksum")
vbytes = dbin[0:4]
depth = from_byte_to_int(dbin[4])
fingerprint = dbin[5:9]
i = decode(dbin[9:13], 256)
chaincode = dbin[13:45]
key = dbin[46:78]+b'\x01' if vbytes in PRIVATE else dbin[45:78]
return (vbytes, depth, fingerprint, i, chaincode, key)
|
def bip32_deserialize(data):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
dbin = changebase(data, 58, 256)
if bin_dbl_sha256(dbin[:-4])[:4] != dbin[-4:]:
raise Exception("Invalid checksum")
vbytes = dbin[0:4]
depth = from_byte_to_int(dbin[4])
fingerprint = dbin[5:9]
i = decode(dbin[9:13], 256)
chaincode = dbin[13:45]
key = dbin[46:78]+b'\x01' if vbytes in PRIVATE else dbin[45:78]
return (vbytes, depth, fingerprint, i, chaincode, key)
|
[
"Derived",
"from",
"code",
"from",
"pybitcointools",
"(",
"https",
":",
"//",
"github",
".",
"com",
"/",
"vbuterin",
"/",
"pybitcointools",
")",
"by",
"Vitalik",
"Buterin"
] |
blockstack-packages/keychain-manager-py
|
python
|
https://github.com/blockstack-packages/keychain-manager-py/blob/c15c4ed8f3ed155f71ccac7c13ee08f081d38c06/keychain/utils.py#L41-L55
|
[
"def",
"bip32_deserialize",
"(",
"data",
")",
":",
"dbin",
"=",
"changebase",
"(",
"data",
",",
"58",
",",
"256",
")",
"if",
"bin_dbl_sha256",
"(",
"dbin",
"[",
":",
"-",
"4",
"]",
")",
"[",
":",
"4",
"]",
"!=",
"dbin",
"[",
"-",
"4",
":",
"]",
":",
"raise",
"Exception",
"(",
"\"Invalid checksum\"",
")",
"vbytes",
"=",
"dbin",
"[",
"0",
":",
"4",
"]",
"depth",
"=",
"from_byte_to_int",
"(",
"dbin",
"[",
"4",
"]",
")",
"fingerprint",
"=",
"dbin",
"[",
"5",
":",
"9",
"]",
"i",
"=",
"decode",
"(",
"dbin",
"[",
"9",
":",
"13",
"]",
",",
"256",
")",
"chaincode",
"=",
"dbin",
"[",
"13",
":",
"45",
"]",
"key",
"=",
"dbin",
"[",
"46",
":",
"78",
"]",
"+",
"b'\\x01'",
"if",
"vbytes",
"in",
"PRIVATE",
"else",
"dbin",
"[",
"45",
":",
"78",
"]",
"return",
"(",
"vbytes",
",",
"depth",
",",
"fingerprint",
",",
"i",
",",
"chaincode",
",",
"key",
")"
] |
c15c4ed8f3ed155f71ccac7c13ee08f081d38c06
|
test
|
SQLiteSchemaExtractor.fetch_table_names
|
:return: List of table names in the database.
:rtype: list
|
sqliteschema/_extractor.py
|
def fetch_table_names(self, include_system_table=False):
"""
:return: List of table names in the database.
:rtype: list
"""
result = self.__cur.execute("SELECT name FROM sqlite_master WHERE TYPE='table'")
if result is None:
return []
table_names = [record[0] for record in result.fetchall()]
if include_system_table:
return table_names
return [table for table in table_names if table not in SQLITE_SYSTEM_TABLES]
|
def fetch_table_names(self, include_system_table=False):
"""
:return: List of table names in the database.
:rtype: list
"""
result = self.__cur.execute("SELECT name FROM sqlite_master WHERE TYPE='table'")
if result is None:
return []
table_names = [record[0] for record in result.fetchall()]
if include_system_table:
return table_names
return [table for table in table_names if table not in SQLITE_SYSTEM_TABLES]
|
[
":",
"return",
":",
"List",
"of",
"table",
"names",
"in",
"the",
"database",
".",
":",
"rtype",
":",
"list"
] |
thombashi/sqliteschema
|
python
|
https://github.com/thombashi/sqliteschema/blob/39fa769e31a0df1123066245073fb3a4593ae72d/sqliteschema/_extractor.py#L74-L89
|
[
"def",
"fetch_table_names",
"(",
"self",
",",
"include_system_table",
"=",
"False",
")",
":",
"result",
"=",
"self",
".",
"__cur",
".",
"execute",
"(",
"\"SELECT name FROM sqlite_master WHERE TYPE='table'\"",
")",
"if",
"result",
"is",
"None",
":",
"return",
"[",
"]",
"table_names",
"=",
"[",
"record",
"[",
"0",
"]",
"for",
"record",
"in",
"result",
".",
"fetchall",
"(",
")",
"]",
"if",
"include_system_table",
":",
"return",
"table_names",
"return",
"[",
"table",
"for",
"table",
"in",
"table_names",
"if",
"table",
"not",
"in",
"SQLITE_SYSTEM_TABLES",
"]"
] |
39fa769e31a0df1123066245073fb3a4593ae72d
|
test
|
SQLiteSchemaExtractor.fetch_sqlite_master
|
Get sqlite_master table information as a list of dictionaries.
:return: sqlite_master table information.
:rtype: list
:Sample Code:
.. code:: python
from sqliteschema import SQLiteSchemaExtractor
print(json.dumps(SQLiteSchemaExtractor("sample.sqlite").fetch_sqlite_master(), indent=4))
:Output:
.. code-block:: json
[
{
"tbl_name": "sample_table",
"sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)",
"type": "table",
"name": "sample_table",
"rootpage": 2
},
{
"tbl_name": "sample_table",
"sql": "CREATE INDEX sample_table_a_index ON sample_table('a')",
"type": "index",
"name": "sample_table_a_index",
"rootpage": 3
}
]
|
sqliteschema/_extractor.py
|
def fetch_sqlite_master(self):
"""
Get sqlite_master table information as a list of dictionaries.
:return: sqlite_master table information.
:rtype: list
:Sample Code:
.. code:: python
from sqliteschema import SQLiteSchemaExtractor
print(json.dumps(SQLiteSchemaExtractor("sample.sqlite").fetch_sqlite_master(), indent=4))
:Output:
.. code-block:: json
[
{
"tbl_name": "sample_table",
"sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)",
"type": "table",
"name": "sample_table",
"rootpage": 2
},
{
"tbl_name": "sample_table",
"sql": "CREATE INDEX sample_table_a_index ON sample_table('a')",
"type": "index",
"name": "sample_table_a_index",
"rootpage": 3
}
]
"""
sqlite_master_record_list = []
result = self.__cur.execute(
"SELECT {:s} FROM sqlite_master".format(", ".join(self._SQLITE_MASTER_ATTR_NAME_LIST))
)
for record in result.fetchall():
sqlite_master_record_list.append(
dict(
[
[attr_name, item]
for attr_name, item in zip(self._SQLITE_MASTER_ATTR_NAME_LIST, record)
]
)
)
return sqlite_master_record_list
|
def fetch_sqlite_master(self):
"""
Get sqlite_master table information as a list of dictionaries.
:return: sqlite_master table information.
:rtype: list
:Sample Code:
.. code:: python
from sqliteschema import SQLiteSchemaExtractor
print(json.dumps(SQLiteSchemaExtractor("sample.sqlite").fetch_sqlite_master(), indent=4))
:Output:
.. code-block:: json
[
{
"tbl_name": "sample_table",
"sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)",
"type": "table",
"name": "sample_table",
"rootpage": 2
},
{
"tbl_name": "sample_table",
"sql": "CREATE INDEX sample_table_a_index ON sample_table('a')",
"type": "index",
"name": "sample_table_a_index",
"rootpage": 3
}
]
"""
sqlite_master_record_list = []
result = self.__cur.execute(
"SELECT {:s} FROM sqlite_master".format(", ".join(self._SQLITE_MASTER_ATTR_NAME_LIST))
)
for record in result.fetchall():
sqlite_master_record_list.append(
dict(
[
[attr_name, item]
for attr_name, item in zip(self._SQLITE_MASTER_ATTR_NAME_LIST, record)
]
)
)
return sqlite_master_record_list
|
[
"Get",
"sqlite_master",
"table",
"information",
"as",
"a",
"list",
"of",
"dictionaries",
"."
] |
thombashi/sqliteschema
|
python
|
https://github.com/thombashi/sqliteschema/blob/39fa769e31a0df1123066245073fb3a4593ae72d/sqliteschema/_extractor.py#L112-L162
|
[
"def",
"fetch_sqlite_master",
"(",
"self",
")",
":",
"sqlite_master_record_list",
"=",
"[",
"]",
"result",
"=",
"self",
".",
"__cur",
".",
"execute",
"(",
"\"SELECT {:s} FROM sqlite_master\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"self",
".",
"_SQLITE_MASTER_ATTR_NAME_LIST",
")",
")",
")",
"for",
"record",
"in",
"result",
".",
"fetchall",
"(",
")",
":",
"sqlite_master_record_list",
".",
"append",
"(",
"dict",
"(",
"[",
"[",
"attr_name",
",",
"item",
"]",
"for",
"attr_name",
",",
"item",
"in",
"zip",
"(",
"self",
".",
"_SQLITE_MASTER_ATTR_NAME_LIST",
",",
"record",
")",
"]",
")",
")",
"return",
"sqlite_master_record_list"
] |
39fa769e31a0df1123066245073fb3a4593ae72d
|
test
|
object_iter
|
Yields each node of object graph in postorder.
|
jsonselect/jsonselect.py
|
def object_iter(obj, parent=None, parent_key=None, idx=None,
siblings=None):
"""Yields each node of object graph in postorder."""
obj_node = Node(value=obj, parent=parent, parent_key=parent_key,
siblings=siblings, idx=idx)
if isinstance(obj, list):
_siblings = len(obj)
for i, elem in enumerate(obj):
for node in object_iter(elem, obj_node, None, i + 1, _siblings):
yield node
elif isinstance(obj, collections.Mapping):
for key in obj:
for node in object_iter(obj[key], obj_node, key):
yield node
yield obj_node
|
def object_iter(obj, parent=None, parent_key=None, idx=None,
siblings=None):
"""Yields each node of object graph in postorder."""
obj_node = Node(value=obj, parent=parent, parent_key=parent_key,
siblings=siblings, idx=idx)
if isinstance(obj, list):
_siblings = len(obj)
for i, elem in enumerate(obj):
for node in object_iter(elem, obj_node, None, i + 1, _siblings):
yield node
elif isinstance(obj, collections.Mapping):
for key in obj:
for node in object_iter(obj[key], obj_node, key):
yield node
yield obj_node
|
[
"Yields",
"each",
"node",
"of",
"object",
"graph",
"in",
"postorder",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L98-L114
|
[
"def",
"object_iter",
"(",
"obj",
",",
"parent",
"=",
"None",
",",
"parent_key",
"=",
"None",
",",
"idx",
"=",
"None",
",",
"siblings",
"=",
"None",
")",
":",
"obj_node",
"=",
"Node",
"(",
"value",
"=",
"obj",
",",
"parent",
"=",
"parent",
",",
"parent_key",
"=",
"parent_key",
",",
"siblings",
"=",
"siblings",
",",
"idx",
"=",
"idx",
")",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"_siblings",
"=",
"len",
"(",
"obj",
")",
"for",
"i",
",",
"elem",
"in",
"enumerate",
"(",
"obj",
")",
":",
"for",
"node",
"in",
"object_iter",
"(",
"elem",
",",
"obj_node",
",",
"None",
",",
"i",
"+",
"1",
",",
"_siblings",
")",
":",
"yield",
"node",
"elif",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Mapping",
")",
":",
"for",
"key",
"in",
"obj",
":",
"for",
"node",
"in",
"object_iter",
"(",
"obj",
"[",
"key",
"]",
",",
"obj_node",
",",
"key",
")",
":",
"yield",
"node",
"yield",
"obj_node"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
select
|
Appy selector to obj and return matching nodes.
If only one node is found, return it, otherwise return a list of matches.
Returns False on syntax error. None if no results found.
|
jsonselect/jsonselect.py
|
def select(selector, obj):
"""Appy selector to obj and return matching nodes.
If only one node is found, return it, otherwise return a list of matches.
Returns False on syntax error. None if no results found.
"""
parser = Parser(obj)
try:
return parser.parse(selector)
except SelectorSyntaxError as e:
log.exception(e)
return False
|
def select(selector, obj):
"""Appy selector to obj and return matching nodes.
If only one node is found, return it, otherwise return a list of matches.
Returns False on syntax error. None if no results found.
"""
parser = Parser(obj)
try:
return parser.parse(selector)
except SelectorSyntaxError as e:
log.exception(e)
return False
|
[
"Appy",
"selector",
"to",
"obj",
"and",
"return",
"matching",
"nodes",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L465-L477
|
[
"def",
"select",
"(",
"selector",
",",
"obj",
")",
":",
"parser",
"=",
"Parser",
"(",
"obj",
")",
"try",
":",
"return",
"parser",
".",
"parse",
"(",
"selector",
")",
"except",
"SelectorSyntaxError",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"e",
")",
"return",
"False"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.parse
|
Accept a list of tokens. Returns matched nodes of self.obj.
|
jsonselect/jsonselect.py
|
def parse(self, selector):
"""Accept a list of tokens. Returns matched nodes of self.obj."""
log.debug(self.obj)
tokens = lex(selector)
if self.peek(tokens, 'operator') == '*':
self.match(tokens, 'operator')
results = list(object_iter(self.obj))
else:
results = self.selector_production(tokens)
results = [node.value for node in results]
# single results should be returned as a primitive
if len(results) == 1:
return results[0]
elif not len(results):
return None
return results
|
def parse(self, selector):
"""Accept a list of tokens. Returns matched nodes of self.obj."""
log.debug(self.obj)
tokens = lex(selector)
if self.peek(tokens, 'operator') == '*':
self.match(tokens, 'operator')
results = list(object_iter(self.obj))
else:
results = self.selector_production(tokens)
results = [node.value for node in results]
# single results should be returned as a primitive
if len(results) == 1:
return results[0]
elif not len(results):
return None
return results
|
[
"Accept",
"a",
"list",
"of",
"tokens",
".",
"Returns",
"matched",
"nodes",
"of",
"self",
".",
"obj",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L153-L170
|
[
"def",
"parse",
"(",
"self",
",",
"selector",
")",
":",
"log",
".",
"debug",
"(",
"self",
".",
"obj",
")",
"tokens",
"=",
"lex",
"(",
"selector",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'operator'",
")",
"==",
"'*'",
":",
"self",
".",
"match",
"(",
"tokens",
",",
"'operator'",
")",
"results",
"=",
"list",
"(",
"object_iter",
"(",
"self",
".",
"obj",
")",
")",
"else",
":",
"results",
"=",
"self",
".",
"selector_production",
"(",
"tokens",
")",
"results",
"=",
"[",
"node",
".",
"value",
"for",
"node",
"in",
"results",
"]",
"# single results should be returned as a primitive",
"if",
"len",
"(",
"results",
")",
"==",
"1",
":",
"return",
"results",
"[",
"0",
"]",
"elif",
"not",
"len",
"(",
"results",
")",
":",
"return",
"None",
"return",
"results"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.selector_production
|
Production for a full selector.
|
jsonselect/jsonselect.py
|
def selector_production(self, tokens):
"""Production for a full selector."""
validators = []
# the following productions should return predicate functions.
if self.peek(tokens, 'type'):
type_ = self.match(tokens, 'type')
validators.append(self.type_production(type_))
if self.peek(tokens, 'identifier'):
key = self.match(tokens, 'identifier')
validators.append(self.key_production(key))
if self.peek(tokens, 'pclass'):
pclass = self.match(tokens, 'pclass')
validators.append(self.pclass_production(pclass))
if self.peek(tokens, 'nth_func'):
nth_func = self.match(tokens, 'nth_func')
validators.append(self.nth_child_production(nth_func, tokens))
if self.peek(tokens, 'pclass_func'):
pclass_func = self.match(tokens, 'pclass_func')
validators.append(self.pclass_func_production(pclass_func, tokens))
if not len(validators):
raise SelectorSyntaxError('no selector recognized.')
# apply validators from a selector expression to self.obj
results = self._match_nodes(validators, self.obj)
if self.peek(tokens, 'operator'):
operator = self.match(tokens, 'operator')
rvals = self.selector_production(tokens)
if operator == ',':
results.extend(rvals)
elif operator == '>':
results = self.parents(results, rvals)
elif operator == '~':
results = self.siblings(results, rvals)
elif operator == ' ':
results = self.ancestors(results, rvals)
else:
raise SelectorSyntaxError("unrecognized operator '%s'"
% operator)
else:
if len(tokens):
rvals = self.selector_production(tokens)
results = self.ancestors(results, rvals)
return results
|
def selector_production(self, tokens):
"""Production for a full selector."""
validators = []
# the following productions should return predicate functions.
if self.peek(tokens, 'type'):
type_ = self.match(tokens, 'type')
validators.append(self.type_production(type_))
if self.peek(tokens, 'identifier'):
key = self.match(tokens, 'identifier')
validators.append(self.key_production(key))
if self.peek(tokens, 'pclass'):
pclass = self.match(tokens, 'pclass')
validators.append(self.pclass_production(pclass))
if self.peek(tokens, 'nth_func'):
nth_func = self.match(tokens, 'nth_func')
validators.append(self.nth_child_production(nth_func, tokens))
if self.peek(tokens, 'pclass_func'):
pclass_func = self.match(tokens, 'pclass_func')
validators.append(self.pclass_func_production(pclass_func, tokens))
if not len(validators):
raise SelectorSyntaxError('no selector recognized.')
# apply validators from a selector expression to self.obj
results = self._match_nodes(validators, self.obj)
if self.peek(tokens, 'operator'):
operator = self.match(tokens, 'operator')
rvals = self.selector_production(tokens)
if operator == ',':
results.extend(rvals)
elif operator == '>':
results = self.parents(results, rvals)
elif operator == '~':
results = self.siblings(results, rvals)
elif operator == ' ':
results = self.ancestors(results, rvals)
else:
raise SelectorSyntaxError("unrecognized operator '%s'"
% operator)
else:
if len(tokens):
rvals = self.selector_production(tokens)
results = self.ancestors(results, rvals)
return results
|
[
"Production",
"for",
"a",
"full",
"selector",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L172-L223
|
[
"def",
"selector_production",
"(",
"self",
",",
"tokens",
")",
":",
"validators",
"=",
"[",
"]",
"# the following productions should return predicate functions.",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'type'",
")",
":",
"type_",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'type'",
")",
"validators",
".",
"append",
"(",
"self",
".",
"type_production",
"(",
"type_",
")",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'identifier'",
")",
":",
"key",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'identifier'",
")",
"validators",
".",
"append",
"(",
"self",
".",
"key_production",
"(",
"key",
")",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'pclass'",
")",
":",
"pclass",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'pclass'",
")",
"validators",
".",
"append",
"(",
"self",
".",
"pclass_production",
"(",
"pclass",
")",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'nth_func'",
")",
":",
"nth_func",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'nth_func'",
")",
"validators",
".",
"append",
"(",
"self",
".",
"nth_child_production",
"(",
"nth_func",
",",
"tokens",
")",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'pclass_func'",
")",
":",
"pclass_func",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'pclass_func'",
")",
"validators",
".",
"append",
"(",
"self",
".",
"pclass_func_production",
"(",
"pclass_func",
",",
"tokens",
")",
")",
"if",
"not",
"len",
"(",
"validators",
")",
":",
"raise",
"SelectorSyntaxError",
"(",
"'no selector recognized.'",
")",
"# apply validators from a selector expression to self.obj",
"results",
"=",
"self",
".",
"_match_nodes",
"(",
"validators",
",",
"self",
".",
"obj",
")",
"if",
"self",
".",
"peek",
"(",
"tokens",
",",
"'operator'",
")",
":",
"operator",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'operator'",
")",
"rvals",
"=",
"self",
".",
"selector_production",
"(",
"tokens",
")",
"if",
"operator",
"==",
"','",
":",
"results",
".",
"extend",
"(",
"rvals",
")",
"elif",
"operator",
"==",
"'>'",
":",
"results",
"=",
"self",
".",
"parents",
"(",
"results",
",",
"rvals",
")",
"elif",
"operator",
"==",
"'~'",
":",
"results",
"=",
"self",
".",
"siblings",
"(",
"results",
",",
"rvals",
")",
"elif",
"operator",
"==",
"' '",
":",
"results",
"=",
"self",
".",
"ancestors",
"(",
"results",
",",
"rvals",
")",
"else",
":",
"raise",
"SelectorSyntaxError",
"(",
"\"unrecognized operator '%s'\"",
"%",
"operator",
")",
"else",
":",
"if",
"len",
"(",
"tokens",
")",
":",
"rvals",
"=",
"self",
".",
"selector_production",
"(",
"tokens",
")",
"results",
"=",
"self",
".",
"ancestors",
"(",
"results",
",",
"rvals",
")",
"return",
"results"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.parents
|
Find nodes in rhs which have parents in lhs.
|
jsonselect/jsonselect.py
|
def parents(self, lhs, rhs):
"""Find nodes in rhs which have parents in lhs."""
return [node for node in rhs if node.parent in lhs]
|
def parents(self, lhs, rhs):
"""Find nodes in rhs which have parents in lhs."""
return [node for node in rhs if node.parent in lhs]
|
[
"Find",
"nodes",
"in",
"rhs",
"which",
"have",
"parents",
"in",
"lhs",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L225-L228
|
[
"def",
"parents",
"(",
"self",
",",
"lhs",
",",
"rhs",
")",
":",
"return",
"[",
"node",
"for",
"node",
"in",
"rhs",
"if",
"node",
".",
"parent",
"in",
"lhs",
"]"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.ancestors
|
Return nodes from rhs which have ancestors in lhs.
|
jsonselect/jsonselect.py
|
def ancestors(self, lhs, rhs):
"""Return nodes from rhs which have ancestors in lhs."""
def _search(node):
if node in lhs:
return True
if not node.parent:
return False
return _search(node.parent)
return [node for node in rhs if _search(node)]
|
def ancestors(self, lhs, rhs):
"""Return nodes from rhs which have ancestors in lhs."""
def _search(node):
if node in lhs:
return True
if not node.parent:
return False
return _search(node.parent)
return [node for node in rhs if _search(node)]
|
[
"Return",
"nodes",
"from",
"rhs",
"which",
"have",
"ancestors",
"in",
"lhs",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L230-L240
|
[
"def",
"ancestors",
"(",
"self",
",",
"lhs",
",",
"rhs",
")",
":",
"def",
"_search",
"(",
"node",
")",
":",
"if",
"node",
"in",
"lhs",
":",
"return",
"True",
"if",
"not",
"node",
".",
"parent",
":",
"return",
"False",
"return",
"_search",
"(",
"node",
".",
"parent",
")",
"return",
"[",
"node",
"for",
"node",
"in",
"rhs",
"if",
"_search",
"(",
"node",
")",
"]"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.siblings
|
Find nodes in rhs having common parents in lhs.
|
jsonselect/jsonselect.py
|
def siblings(self, lhs, rhs):
"""Find nodes in rhs having common parents in lhs."""
parents = [node.parent for node in lhs]
return [node for node in rhs if node.parent in parents]
|
def siblings(self, lhs, rhs):
"""Find nodes in rhs having common parents in lhs."""
parents = [node.parent for node in lhs]
return [node for node in rhs if node.parent in parents]
|
[
"Find",
"nodes",
"in",
"rhs",
"having",
"common",
"parents",
"in",
"lhs",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L242-L246
|
[
"def",
"siblings",
"(",
"self",
",",
"lhs",
",",
"rhs",
")",
":",
"parents",
"=",
"[",
"node",
".",
"parent",
"for",
"node",
"in",
"lhs",
"]",
"return",
"[",
"node",
"for",
"node",
"in",
"rhs",
"if",
"node",
".",
"parent",
"in",
"parents",
"]"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser.nth_child_production
|
Parse args and pass them to pclass_func_validator.
|
jsonselect/jsonselect.py
|
def nth_child_production(self, lexeme, tokens):
"""Parse args and pass them to pclass_func_validator."""
args = self.match(tokens, 'expr')
pat = self.nth_child_pat.match(args)
if pat.group(5):
a = 2
b = 1 if pat.group(5) == 'odd' else 0
elif pat.group(6):
a = 0
b = int(pat.group(6))
else:
sign = pat.group(1) if pat.group(1) else '+'
coef = pat.group(2) if pat.group(2) else '1'
a = eval(sign + coef)
b = eval(pat.group(3) + pat.group(4)) if pat.group(3) else 0
reverse = False
if lexeme == 'nth-last-child':
reverse = True
def validate(node):
"""This crazy function taken from jsonselect.js:444."""
if not node.siblings:
return False
idx = node.idx - 1
tot = node.siblings
if reverse:
idx = tot - idx
else:
idx += 1
if a == 0:
m = b == idx
else:
mod = (idx - b) % a
m = not mod and (idx * a + b) >= 0
return m
return validate
|
def nth_child_production(self, lexeme, tokens):
"""Parse args and pass them to pclass_func_validator."""
args = self.match(tokens, 'expr')
pat = self.nth_child_pat.match(args)
if pat.group(5):
a = 2
b = 1 if pat.group(5) == 'odd' else 0
elif pat.group(6):
a = 0
b = int(pat.group(6))
else:
sign = pat.group(1) if pat.group(1) else '+'
coef = pat.group(2) if pat.group(2) else '1'
a = eval(sign + coef)
b = eval(pat.group(3) + pat.group(4)) if pat.group(3) else 0
reverse = False
if lexeme == 'nth-last-child':
reverse = True
def validate(node):
"""This crazy function taken from jsonselect.js:444."""
if not node.siblings:
return False
idx = node.idx - 1
tot = node.siblings
if reverse:
idx = tot - idx
else:
idx += 1
if a == 0:
m = b == idx
else:
mod = (idx - b) % a
m = not mod and (idx * a + b) >= 0
return m
return validate
|
[
"Parse",
"args",
"and",
"pass",
"them",
"to",
"pclass_func_validator",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L386-L430
|
[
"def",
"nth_child_production",
"(",
"self",
",",
"lexeme",
",",
"tokens",
")",
":",
"args",
"=",
"self",
".",
"match",
"(",
"tokens",
",",
"'expr'",
")",
"pat",
"=",
"self",
".",
"nth_child_pat",
".",
"match",
"(",
"args",
")",
"if",
"pat",
".",
"group",
"(",
"5",
")",
":",
"a",
"=",
"2",
"b",
"=",
"1",
"if",
"pat",
".",
"group",
"(",
"5",
")",
"==",
"'odd'",
"else",
"0",
"elif",
"pat",
".",
"group",
"(",
"6",
")",
":",
"a",
"=",
"0",
"b",
"=",
"int",
"(",
"pat",
".",
"group",
"(",
"6",
")",
")",
"else",
":",
"sign",
"=",
"pat",
".",
"group",
"(",
"1",
")",
"if",
"pat",
".",
"group",
"(",
"1",
")",
"else",
"'+'",
"coef",
"=",
"pat",
".",
"group",
"(",
"2",
")",
"if",
"pat",
".",
"group",
"(",
"2",
")",
"else",
"'1'",
"a",
"=",
"eval",
"(",
"sign",
"+",
"coef",
")",
"b",
"=",
"eval",
"(",
"pat",
".",
"group",
"(",
"3",
")",
"+",
"pat",
".",
"group",
"(",
"4",
")",
")",
"if",
"pat",
".",
"group",
"(",
"3",
")",
"else",
"0",
"reverse",
"=",
"False",
"if",
"lexeme",
"==",
"'nth-last-child'",
":",
"reverse",
"=",
"True",
"def",
"validate",
"(",
"node",
")",
":",
"\"\"\"This crazy function taken from jsonselect.js:444.\"\"\"",
"if",
"not",
"node",
".",
"siblings",
":",
"return",
"False",
"idx",
"=",
"node",
".",
"idx",
"-",
"1",
"tot",
"=",
"node",
".",
"siblings",
"if",
"reverse",
":",
"idx",
"=",
"tot",
"-",
"idx",
"else",
":",
"idx",
"+=",
"1",
"if",
"a",
"==",
"0",
":",
"m",
"=",
"b",
"==",
"idx",
"else",
":",
"mod",
"=",
"(",
"idx",
"-",
"b",
")",
"%",
"a",
"m",
"=",
"not",
"mod",
"and",
"(",
"idx",
"*",
"a",
"+",
"b",
")",
">=",
"0",
"return",
"m",
"return",
"validate"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
Parser._match_nodes
|
Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
|
jsonselect/jsonselect.py
|
def _match_nodes(self, validators, obj):
"""Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
"""
results = []
for node in object_iter(obj):
if all([validate(node) for validate in validators]):
results.append(node)
return results
|
def _match_nodes(self, validators, obj):
"""Apply each validator in validators to each node in obj.
Return each node in obj which matches all validators.
"""
results = []
for node in object_iter(obj):
if all([validate(node) for validate in validators]):
results.append(node)
return results
|
[
"Apply",
"each",
"validator",
"in",
"validators",
"to",
"each",
"node",
"in",
"obj",
"."
] |
mwhooker/jsonselect
|
python
|
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L432-L442
|
[
"def",
"_match_nodes",
"(",
"self",
",",
"validators",
",",
"obj",
")",
":",
"results",
"=",
"[",
"]",
"for",
"node",
"in",
"object_iter",
"(",
"obj",
")",
":",
"if",
"all",
"(",
"[",
"validate",
"(",
"node",
")",
"for",
"validate",
"in",
"validators",
"]",
")",
":",
"results",
".",
"append",
"(",
"node",
")",
"return",
"results"
] |
c64aa9ea930de0344797ff87b04c753c8fc096a6
|
test
|
ping
|
Sends ICMP echo requests to destination `dst` `count` times.
Returns a deferred which fires when responses are finished.
|
tensor/protocol/icmp.py
|
def ping(dst, count, inter=0.2, maxwait=1000, size=64):
"""Sends ICMP echo requests to destination `dst` `count` times.
Returns a deferred which fires when responses are finished.
"""
def _then(result, p):
p.stopListening()
return result
d = defer.Deferred()
p = ICMPPort(0, ICMPPing(d, dst, count, inter, maxwait, size), "", 8192, reactor)
p.startListening()
return d.addCallback(_then, p)
|
def ping(dst, count, inter=0.2, maxwait=1000, size=64):
"""Sends ICMP echo requests to destination `dst` `count` times.
Returns a deferred which fires when responses are finished.
"""
def _then(result, p):
p.stopListening()
return result
d = defer.Deferred()
p = ICMPPort(0, ICMPPing(d, dst, count, inter, maxwait, size), "", 8192, reactor)
p.startListening()
return d.addCallback(_then, p)
|
[
"Sends",
"ICMP",
"echo",
"requests",
"to",
"destination",
"dst",
"count",
"times",
".",
"Returns",
"a",
"deferred",
"which",
"fires",
"when",
"responses",
"are",
"finished",
"."
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/protocol/icmp.py#L208-L220
|
[
"def",
"ping",
"(",
"dst",
",",
"count",
",",
"inter",
"=",
"0.2",
",",
"maxwait",
"=",
"1000",
",",
"size",
"=",
"64",
")",
":",
"def",
"_then",
"(",
"result",
",",
"p",
")",
":",
"p",
".",
"stopListening",
"(",
")",
"return",
"result",
"d",
"=",
"defer",
".",
"Deferred",
"(",
")",
"p",
"=",
"ICMPPort",
"(",
"0",
",",
"ICMPPing",
"(",
"d",
",",
"dst",
",",
"count",
",",
"inter",
",",
"maxwait",
",",
"size",
")",
",",
"\"\"",
",",
"8192",
",",
"reactor",
")",
"p",
".",
"startListening",
"(",
")",
"return",
"d",
".",
"addCallback",
"(",
"_then",
",",
"p",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
HTTPRequest.getBody
|
Make an HTTP request and return the body
|
tensor/utils.py
|
def getBody(self, url, method='GET', headers={}, data=None, socket=None):
"""Make an HTTP request and return the body
"""
if not 'User-Agent' in headers:
headers['User-Agent'] = ['Tensor HTTP checker']
return self.request(url, method, headers, data, socket)
|
def getBody(self, url, method='GET', headers={}, data=None, socket=None):
"""Make an HTTP request and return the body
"""
if not 'User-Agent' in headers:
headers['User-Agent'] = ['Tensor HTTP checker']
return self.request(url, method, headers, data, socket)
|
[
"Make",
"an",
"HTTP",
"request",
"and",
"return",
"the",
"body"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L254-L261
|
[
"def",
"getBody",
"(",
"self",
",",
"url",
",",
"method",
"=",
"'GET'",
",",
"headers",
"=",
"{",
"}",
",",
"data",
"=",
"None",
",",
"socket",
"=",
"None",
")",
":",
"if",
"not",
"'User-Agent'",
"in",
"headers",
":",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"[",
"'Tensor HTTP checker'",
"]",
"return",
"self",
".",
"request",
"(",
"url",
",",
"method",
",",
"headers",
",",
"data",
",",
"socket",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
PersistentCache.expire
|
Expire any items in the cache older than `age` seconds
|
tensor/utils.py
|
def expire(self, age):
"""Expire any items in the cache older than `age` seconds"""
now = time.time()
cache = self._acquire_cache()
expired = [k for k, v in cache.items() if (now - v[0]) > age]
for k in expired:
if k in cache:
del cache[k]
if k in self.store:
del self.store[k]
self._write_cache(cache)
|
def expire(self, age):
"""Expire any items in the cache older than `age` seconds"""
now = time.time()
cache = self._acquire_cache()
expired = [k for k, v in cache.items() if (now - v[0]) > age]
for k in expired:
if k in cache:
del cache[k]
if k in self.store:
del self.store[k]
self._write_cache(cache)
|
[
"Expire",
"any",
"items",
"in",
"the",
"cache",
"older",
"than",
"age",
"seconds"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L329-L342
|
[
"def",
"expire",
"(",
"self",
",",
"age",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"cache",
"=",
"self",
".",
"_acquire_cache",
"(",
")",
"expired",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"cache",
".",
"items",
"(",
")",
"if",
"(",
"now",
"-",
"v",
"[",
"0",
"]",
")",
">",
"age",
"]",
"for",
"k",
"in",
"expired",
":",
"if",
"k",
"in",
"cache",
":",
"del",
"cache",
"[",
"k",
"]",
"if",
"k",
"in",
"self",
".",
"store",
":",
"del",
"self",
".",
"store",
"[",
"k",
"]",
"self",
".",
"_write_cache",
"(",
"cache",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
PersistentCache.set
|
Set a key `k` to value `v`
|
tensor/utils.py
|
def set(self, k, v):
"""Set a key `k` to value `v`"""
self.store[k] = (time.time(), v)
self._persist()
|
def set(self, k, v):
"""Set a key `k` to value `v`"""
self.store[k] = (time.time(), v)
self._persist()
|
[
"Set",
"a",
"key",
"k",
"to",
"value",
"v"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L344-L347
|
[
"def",
"set",
"(",
"self",
",",
"k",
",",
"v",
")",
":",
"self",
".",
"store",
"[",
"k",
"]",
"=",
"(",
"time",
".",
"time",
"(",
")",
",",
"v",
")",
"self",
".",
"_persist",
"(",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
PersistentCache.get
|
Returns key contents, and modify time
|
tensor/utils.py
|
def get(self, k):
"""Returns key contents, and modify time"""
if self._changed():
self._read()
if k in self.store:
return tuple(self.store[k])
else:
return None
|
def get(self, k):
"""Returns key contents, and modify time"""
if self._changed():
self._read()
if k in self.store:
return tuple(self.store[k])
else:
return None
|
[
"Returns",
"key",
"contents",
"and",
"modify",
"time"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L349-L357
|
[
"def",
"get",
"(",
"self",
",",
"k",
")",
":",
"if",
"self",
".",
"_changed",
"(",
")",
":",
"self",
".",
"_read",
"(",
")",
"if",
"k",
"in",
"self",
".",
"store",
":",
"return",
"tuple",
"(",
"self",
".",
"store",
"[",
"k",
"]",
")",
"else",
":",
"return",
"None"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
PersistentCache.contains
|
Return True if key `k` exists
|
tensor/utils.py
|
def contains(self, k):
"""Return True if key `k` exists"""
if self._changed():
self._read()
return k in self.store.keys()
|
def contains(self, k):
"""Return True if key `k` exists"""
if self._changed():
self._read()
return k in self.store.keys()
|
[
"Return",
"True",
"if",
"key",
"k",
"exists"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L359-L363
|
[
"def",
"contains",
"(",
"self",
",",
"k",
")",
":",
"if",
"self",
".",
"_changed",
"(",
")",
":",
"self",
".",
"_read",
"(",
")",
"return",
"k",
"in",
"self",
".",
"store",
".",
"keys",
"(",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
NistBeacon.chain_check
|
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
|
nistbeacon/nistbeacon.py
|
def chain_check(cls, timestamp: int) -> bool:
"""
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
"""
# Creation is messy.
# You want genius, you get madness; two sides of the same coin.
# ... I'm sure this can be cleaned up. However, let's test it first.
record = cls.get_record(timestamp)
if isinstance(record, NistBeaconValue) is False:
# Don't you dare try to play me
return False
prev_record = cls.get_previous(record.timestamp)
next_record = cls.get_next(record.timestamp)
if prev_record is None and next_record is None:
# Uh, how did you manage to do this?
# I'm not even mad, that's amazing.
return False
if (
isinstance(prev_record, NistBeaconValue) and
isinstance(next_record, NistBeaconValue)
):
# Majority case, somewhere in the middle of the chain
# True if:
# - All three records have proper signatures
# - The requested record's previous output equals previous
# - The next possible record's previous output equals the record
return (
record.valid_signature and
prev_record.valid_signature and
next_record.valid_signature and
record.previous_output_value == prev_record.output_value and
next_record.previous_output_value == record.output_value
)
if (
prev_record is None and
isinstance(next_record, NistBeaconValue)
):
# Edge case, this was potentially the first record of all time
return (
record.valid_signature and
next_record.valid_signature and
cls._INIT_RECORD == record and
next_record.previous_output_value == record.output_value
)
if (
isinstance(prev_record, NistBeaconValue) and
next_record is None
):
# Edge case, this was potentially the latest and greatest
return (
record.valid_signature and
prev_record.valid_signature and
record.previous_output_value == prev_record.output_value
)
|
def chain_check(cls, timestamp: int) -> bool:
"""
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
"""
# Creation is messy.
# You want genius, you get madness; two sides of the same coin.
# ... I'm sure this can be cleaned up. However, let's test it first.
record = cls.get_record(timestamp)
if isinstance(record, NistBeaconValue) is False:
# Don't you dare try to play me
return False
prev_record = cls.get_previous(record.timestamp)
next_record = cls.get_next(record.timestamp)
if prev_record is None and next_record is None:
# Uh, how did you manage to do this?
# I'm not even mad, that's amazing.
return False
if (
isinstance(prev_record, NistBeaconValue) and
isinstance(next_record, NistBeaconValue)
):
# Majority case, somewhere in the middle of the chain
# True if:
# - All three records have proper signatures
# - The requested record's previous output equals previous
# - The next possible record's previous output equals the record
return (
record.valid_signature and
prev_record.valid_signature and
next_record.valid_signature and
record.previous_output_value == prev_record.output_value and
next_record.previous_output_value == record.output_value
)
if (
prev_record is None and
isinstance(next_record, NistBeaconValue)
):
# Edge case, this was potentially the first record of all time
return (
record.valid_signature and
next_record.valid_signature and
cls._INIT_RECORD == record and
next_record.previous_output_value == record.output_value
)
if (
isinstance(prev_record, NistBeaconValue) and
next_record is None
):
# Edge case, this was potentially the latest and greatest
return (
record.valid_signature and
prev_record.valid_signature and
record.previous_output_value == prev_record.output_value
)
|
[
"Given",
"a",
"record",
"timestamp",
"verify",
"the",
"chain",
"integrity",
"."
] |
urda/nistbeacon
|
python
|
https://github.com/urda/nistbeacon/blob/43e0c3d1e186e71387f072daf98911abb14469dd/nistbeacon/nistbeacon.py#L80-L144
|
[
"def",
"chain_check",
"(",
"cls",
",",
"timestamp",
":",
"int",
")",
"->",
"bool",
":",
"# Creation is messy.",
"# You want genius, you get madness; two sides of the same coin.",
"# ... I'm sure this can be cleaned up. However, let's test it first.",
"record",
"=",
"cls",
".",
"get_record",
"(",
"timestamp",
")",
"if",
"isinstance",
"(",
"record",
",",
"NistBeaconValue",
")",
"is",
"False",
":",
"# Don't you dare try to play me",
"return",
"False",
"prev_record",
"=",
"cls",
".",
"get_previous",
"(",
"record",
".",
"timestamp",
")",
"next_record",
"=",
"cls",
".",
"get_next",
"(",
"record",
".",
"timestamp",
")",
"if",
"prev_record",
"is",
"None",
"and",
"next_record",
"is",
"None",
":",
"# Uh, how did you manage to do this?",
"# I'm not even mad, that's amazing.",
"return",
"False",
"if",
"(",
"isinstance",
"(",
"prev_record",
",",
"NistBeaconValue",
")",
"and",
"isinstance",
"(",
"next_record",
",",
"NistBeaconValue",
")",
")",
":",
"# Majority case, somewhere in the middle of the chain",
"# True if:",
"# - All three records have proper signatures",
"# - The requested record's previous output equals previous",
"# - The next possible record's previous output equals the record",
"return",
"(",
"record",
".",
"valid_signature",
"and",
"prev_record",
".",
"valid_signature",
"and",
"next_record",
".",
"valid_signature",
"and",
"record",
".",
"previous_output_value",
"==",
"prev_record",
".",
"output_value",
"and",
"next_record",
".",
"previous_output_value",
"==",
"record",
".",
"output_value",
")",
"if",
"(",
"prev_record",
"is",
"None",
"and",
"isinstance",
"(",
"next_record",
",",
"NistBeaconValue",
")",
")",
":",
"# Edge case, this was potentially the first record of all time",
"return",
"(",
"record",
".",
"valid_signature",
"and",
"next_record",
".",
"valid_signature",
"and",
"cls",
".",
"_INIT_RECORD",
"==",
"record",
"and",
"next_record",
".",
"previous_output_value",
"==",
"record",
".",
"output_value",
")",
"if",
"(",
"isinstance",
"(",
"prev_record",
",",
"NistBeaconValue",
")",
"and",
"next_record",
"is",
"None",
")",
":",
"# Edge case, this was potentially the latest and greatest",
"return",
"(",
"record",
".",
"valid_signature",
"and",
"prev_record",
".",
"valid_signature",
"and",
"record",
".",
"previous_output_value",
"==",
"prev_record",
".",
"output_value",
")"
] |
43e0c3d1e186e71387f072daf98911abb14469dd
|
test
|
NistBeacon.get_first_record
|
Get the first (oldest) record available. Since the first record
IS a known value in the system we can load it from constants.
:param download: 'True' will always reach out to NIST to get the
first record. 'False' returns a local copy.
:return: The first beacon value. 'None' otherwise.
|
nistbeacon/nistbeacon.py
|
def get_first_record(
cls,
download: bool=True
) -> NistBeaconValue:
"""
Get the first (oldest) record available. Since the first record
IS a known value in the system we can load it from constants.
:param download: 'True' will always reach out to NIST to get the
first record. 'False' returns a local copy.
:return: The first beacon value. 'None' otherwise.
"""
if download:
return NistBeacon.get_record(cls._INIT_RECORD.timestamp)
else:
return NistBeaconValue.from_json(cls._INIT_RECORD.json)
|
def get_first_record(
cls,
download: bool=True
) -> NistBeaconValue:
"""
Get the first (oldest) record available. Since the first record
IS a known value in the system we can load it from constants.
:param download: 'True' will always reach out to NIST to get the
first record. 'False' returns a local copy.
:return: The first beacon value. 'None' otherwise.
"""
if download:
return NistBeacon.get_record(cls._INIT_RECORD.timestamp)
else:
return NistBeaconValue.from_json(cls._INIT_RECORD.json)
|
[
"Get",
"the",
"first",
"(",
"oldest",
")",
"record",
"available",
".",
"Since",
"the",
"first",
"record",
"IS",
"a",
"known",
"value",
"in",
"the",
"system",
"we",
"can",
"load",
"it",
"from",
"constants",
"."
] |
urda/nistbeacon
|
python
|
https://github.com/urda/nistbeacon/blob/43e0c3d1e186e71387f072daf98911abb14469dd/nistbeacon/nistbeacon.py#L147-L163
|
[
"def",
"get_first_record",
"(",
"cls",
",",
"download",
":",
"bool",
"=",
"True",
")",
"->",
"NistBeaconValue",
":",
"if",
"download",
":",
"return",
"NistBeacon",
".",
"get_record",
"(",
"cls",
".",
"_INIT_RECORD",
".",
"timestamp",
")",
"else",
":",
"return",
"NistBeaconValue",
".",
"from_json",
"(",
"cls",
".",
"_INIT_RECORD",
".",
"json",
")"
] |
43e0c3d1e186e71387f072daf98911abb14469dd
|
test
|
NistBeaconValue.from_json
|
Convert a string of JSON which represents a NIST randomness beacon
value into a 'NistBeaconValue' object.
:param input_json: JSON to build a 'Nist RandomnessBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
|
nistbeacon/nistbeaconvalue.py
|
def from_json(cls, input_json: str) -> 'NistBeaconValue':
"""
Convert a string of JSON which represents a NIST randomness beacon
value into a 'NistBeaconValue' object.
:param input_json: JSON to build a 'Nist RandomnessBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
"""
try:
data_dict = json.loads(input_json)
except ValueError:
return None
# Our required values are "must haves". This makes it simple
# to verify we loaded everything out of JSON correctly.
required_values = {
cls._KEY_FREQUENCY: None,
cls._KEY_OUTPUT_VALUE: None,
cls._KEY_PREVIOUS_OUTPUT_VALUE: None,
cls._KEY_SEED_VALUE: None,
cls._KEY_SIGNATURE_VALUE: None,
cls._KEY_STATUS_CODE: None,
cls._KEY_TIMESTAMP: None,
cls._KEY_VERSION: None,
}
for key in required_values:
if key in data_dict:
required_values[key] = data_dict[key]
# Confirm that the required values are set, and not 'None'
if None in required_values.values():
return None
# We have all the required values, return a node object
return cls(
version=required_values[cls._KEY_VERSION],
frequency=int(required_values[cls._KEY_FREQUENCY]),
timestamp=int(required_values[cls._KEY_TIMESTAMP]),
seed_value=required_values[cls._KEY_SEED_VALUE],
previous_output_value=required_values[
cls._KEY_PREVIOUS_OUTPUT_VALUE
],
signature_value=required_values[cls._KEY_SIGNATURE_VALUE],
output_value=required_values[cls._KEY_OUTPUT_VALUE],
status_code=required_values[cls._KEY_STATUS_CODE],
)
|
def from_json(cls, input_json: str) -> 'NistBeaconValue':
"""
Convert a string of JSON which represents a NIST randomness beacon
value into a 'NistBeaconValue' object.
:param input_json: JSON to build a 'Nist RandomnessBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
"""
try:
data_dict = json.loads(input_json)
except ValueError:
return None
# Our required values are "must haves". This makes it simple
# to verify we loaded everything out of JSON correctly.
required_values = {
cls._KEY_FREQUENCY: None,
cls._KEY_OUTPUT_VALUE: None,
cls._KEY_PREVIOUS_OUTPUT_VALUE: None,
cls._KEY_SEED_VALUE: None,
cls._KEY_SIGNATURE_VALUE: None,
cls._KEY_STATUS_CODE: None,
cls._KEY_TIMESTAMP: None,
cls._KEY_VERSION: None,
}
for key in required_values:
if key in data_dict:
required_values[key] = data_dict[key]
# Confirm that the required values are set, and not 'None'
if None in required_values.values():
return None
# We have all the required values, return a node object
return cls(
version=required_values[cls._KEY_VERSION],
frequency=int(required_values[cls._KEY_FREQUENCY]),
timestamp=int(required_values[cls._KEY_TIMESTAMP]),
seed_value=required_values[cls._KEY_SEED_VALUE],
previous_output_value=required_values[
cls._KEY_PREVIOUS_OUTPUT_VALUE
],
signature_value=required_values[cls._KEY_SIGNATURE_VALUE],
output_value=required_values[cls._KEY_OUTPUT_VALUE],
status_code=required_values[cls._KEY_STATUS_CODE],
)
|
[
"Convert",
"a",
"string",
"of",
"JSON",
"which",
"represents",
"a",
"NIST",
"randomness",
"beacon",
"value",
"into",
"a",
"NistBeaconValue",
"object",
"."
] |
urda/nistbeacon
|
python
|
https://github.com/urda/nistbeacon/blob/43e0c3d1e186e71387f072daf98911abb14469dd/nistbeacon/nistbeaconvalue.py#L321-L368
|
[
"def",
"from_json",
"(",
"cls",
",",
"input_json",
":",
"str",
")",
"->",
"'NistBeaconValue'",
":",
"try",
":",
"data_dict",
"=",
"json",
".",
"loads",
"(",
"input_json",
")",
"except",
"ValueError",
":",
"return",
"None",
"# Our required values are \"must haves\". This makes it simple",
"# to verify we loaded everything out of JSON correctly.",
"required_values",
"=",
"{",
"cls",
".",
"_KEY_FREQUENCY",
":",
"None",
",",
"cls",
".",
"_KEY_OUTPUT_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_PREVIOUS_OUTPUT_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_SEED_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_SIGNATURE_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_STATUS_CODE",
":",
"None",
",",
"cls",
".",
"_KEY_TIMESTAMP",
":",
"None",
",",
"cls",
".",
"_KEY_VERSION",
":",
"None",
",",
"}",
"for",
"key",
"in",
"required_values",
":",
"if",
"key",
"in",
"data_dict",
":",
"required_values",
"[",
"key",
"]",
"=",
"data_dict",
"[",
"key",
"]",
"# Confirm that the required values are set, and not 'None'",
"if",
"None",
"in",
"required_values",
".",
"values",
"(",
")",
":",
"return",
"None",
"# We have all the required values, return a node object",
"return",
"cls",
"(",
"version",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_VERSION",
"]",
",",
"frequency",
"=",
"int",
"(",
"required_values",
"[",
"cls",
".",
"_KEY_FREQUENCY",
"]",
")",
",",
"timestamp",
"=",
"int",
"(",
"required_values",
"[",
"cls",
".",
"_KEY_TIMESTAMP",
"]",
")",
",",
"seed_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_SEED_VALUE",
"]",
",",
"previous_output_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_PREVIOUS_OUTPUT_VALUE",
"]",
",",
"signature_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_SIGNATURE_VALUE",
"]",
",",
"output_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_OUTPUT_VALUE",
"]",
",",
"status_code",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_STATUS_CODE",
"]",
",",
")"
] |
43e0c3d1e186e71387f072daf98911abb14469dd
|
test
|
NistBeaconValue.from_xml
|
Convert a string of XML which represents a NIST Randomness Beacon value
into a 'NistBeaconValue' object.
:param input_xml: XML to build a 'NistBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
|
nistbeacon/nistbeaconvalue.py
|
def from_xml(cls, input_xml: str) -> 'NistBeaconValue':
"""
Convert a string of XML which represents a NIST Randomness Beacon value
into a 'NistBeaconValue' object.
:param input_xml: XML to build a 'NistBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
"""
invalid_result = None
understood_namespaces = {
'nist-0.1': 'http://beacon.nist.gov/record/0.1/',
}
# Our required values are "must haves". This makes it simple
# to verify we loaded everything out of XML correctly.
required_values = {
cls._KEY_FREQUENCY: None,
cls._KEY_OUTPUT_VALUE: None,
cls._KEY_PREVIOUS_OUTPUT_VALUE: None,
cls._KEY_SEED_VALUE: None,
cls._KEY_SIGNATURE_VALUE: None,
cls._KEY_STATUS_CODE: None,
cls._KEY_TIMESTAMP: None,
cls._KEY_VERSION: None,
}
# First attempt to load the xml, return 'None' on ParseError
try:
tree = ElementTree.ElementTree(ElementTree.fromstring(input_xml))
except ElementTree.ParseError:
return invalid_result
# Using the required values, let's load the xml values in
for key in required_values:
discovered_element = tree.find(
"{0}:{1}".format('nist-0.1', key),
namespaces=understood_namespaces,
)
if not isinstance(discovered_element, ElementTree.Element):
continue
# Bad pylint message - https://github.com/PyCQA/pylint/issues/476
# pylint: disable=no-member
required_values[key] = discovered_element.text
# Confirm that the required values are set, and not 'None'
if None in required_values.values():
return invalid_result
# We have all the required values, return a node object
return cls(
version=required_values[cls._KEY_VERSION],
frequency=int(required_values[cls._KEY_FREQUENCY]),
timestamp=int(required_values[cls._KEY_TIMESTAMP]),
seed_value=required_values[cls._KEY_SEED_VALUE],
previous_output_value=required_values[
cls._KEY_PREVIOUS_OUTPUT_VALUE
],
signature_value=required_values[cls._KEY_SIGNATURE_VALUE],
output_value=required_values[cls._KEY_OUTPUT_VALUE],
status_code=required_values[cls._KEY_STATUS_CODE],
)
|
def from_xml(cls, input_xml: str) -> 'NistBeaconValue':
"""
Convert a string of XML which represents a NIST Randomness Beacon value
into a 'NistBeaconValue' object.
:param input_xml: XML to build a 'NistBeaconValue' from
:return: A 'NistBeaconValue' object, 'None' otherwise
"""
invalid_result = None
understood_namespaces = {
'nist-0.1': 'http://beacon.nist.gov/record/0.1/',
}
# Our required values are "must haves". This makes it simple
# to verify we loaded everything out of XML correctly.
required_values = {
cls._KEY_FREQUENCY: None,
cls._KEY_OUTPUT_VALUE: None,
cls._KEY_PREVIOUS_OUTPUT_VALUE: None,
cls._KEY_SEED_VALUE: None,
cls._KEY_SIGNATURE_VALUE: None,
cls._KEY_STATUS_CODE: None,
cls._KEY_TIMESTAMP: None,
cls._KEY_VERSION: None,
}
# First attempt to load the xml, return 'None' on ParseError
try:
tree = ElementTree.ElementTree(ElementTree.fromstring(input_xml))
except ElementTree.ParseError:
return invalid_result
# Using the required values, let's load the xml values in
for key in required_values:
discovered_element = tree.find(
"{0}:{1}".format('nist-0.1', key),
namespaces=understood_namespaces,
)
if not isinstance(discovered_element, ElementTree.Element):
continue
# Bad pylint message - https://github.com/PyCQA/pylint/issues/476
# pylint: disable=no-member
required_values[key] = discovered_element.text
# Confirm that the required values are set, and not 'None'
if None in required_values.values():
return invalid_result
# We have all the required values, return a node object
return cls(
version=required_values[cls._KEY_VERSION],
frequency=int(required_values[cls._KEY_FREQUENCY]),
timestamp=int(required_values[cls._KEY_TIMESTAMP]),
seed_value=required_values[cls._KEY_SEED_VALUE],
previous_output_value=required_values[
cls._KEY_PREVIOUS_OUTPUT_VALUE
],
signature_value=required_values[cls._KEY_SIGNATURE_VALUE],
output_value=required_values[cls._KEY_OUTPUT_VALUE],
status_code=required_values[cls._KEY_STATUS_CODE],
)
|
[
"Convert",
"a",
"string",
"of",
"XML",
"which",
"represents",
"a",
"NIST",
"Randomness",
"Beacon",
"value",
"into",
"a",
"NistBeaconValue",
"object",
"."
] |
urda/nistbeacon
|
python
|
https://github.com/urda/nistbeacon/blob/43e0c3d1e186e71387f072daf98911abb14469dd/nistbeacon/nistbeaconvalue.py#L371-L435
|
[
"def",
"from_xml",
"(",
"cls",
",",
"input_xml",
":",
"str",
")",
"->",
"'NistBeaconValue'",
":",
"invalid_result",
"=",
"None",
"understood_namespaces",
"=",
"{",
"'nist-0.1'",
":",
"'http://beacon.nist.gov/record/0.1/'",
",",
"}",
"# Our required values are \"must haves\". This makes it simple",
"# to verify we loaded everything out of XML correctly.",
"required_values",
"=",
"{",
"cls",
".",
"_KEY_FREQUENCY",
":",
"None",
",",
"cls",
".",
"_KEY_OUTPUT_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_PREVIOUS_OUTPUT_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_SEED_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_SIGNATURE_VALUE",
":",
"None",
",",
"cls",
".",
"_KEY_STATUS_CODE",
":",
"None",
",",
"cls",
".",
"_KEY_TIMESTAMP",
":",
"None",
",",
"cls",
".",
"_KEY_VERSION",
":",
"None",
",",
"}",
"# First attempt to load the xml, return 'None' on ParseError",
"try",
":",
"tree",
"=",
"ElementTree",
".",
"ElementTree",
"(",
"ElementTree",
".",
"fromstring",
"(",
"input_xml",
")",
")",
"except",
"ElementTree",
".",
"ParseError",
":",
"return",
"invalid_result",
"# Using the required values, let's load the xml values in",
"for",
"key",
"in",
"required_values",
":",
"discovered_element",
"=",
"tree",
".",
"find",
"(",
"\"{0}:{1}\"",
".",
"format",
"(",
"'nist-0.1'",
",",
"key",
")",
",",
"namespaces",
"=",
"understood_namespaces",
",",
")",
"if",
"not",
"isinstance",
"(",
"discovered_element",
",",
"ElementTree",
".",
"Element",
")",
":",
"continue",
"# Bad pylint message - https://github.com/PyCQA/pylint/issues/476",
"# pylint: disable=no-member",
"required_values",
"[",
"key",
"]",
"=",
"discovered_element",
".",
"text",
"# Confirm that the required values are set, and not 'None'",
"if",
"None",
"in",
"required_values",
".",
"values",
"(",
")",
":",
"return",
"invalid_result",
"# We have all the required values, return a node object",
"return",
"cls",
"(",
"version",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_VERSION",
"]",
",",
"frequency",
"=",
"int",
"(",
"required_values",
"[",
"cls",
".",
"_KEY_FREQUENCY",
"]",
")",
",",
"timestamp",
"=",
"int",
"(",
"required_values",
"[",
"cls",
".",
"_KEY_TIMESTAMP",
"]",
")",
",",
"seed_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_SEED_VALUE",
"]",
",",
"previous_output_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_PREVIOUS_OUTPUT_VALUE",
"]",
",",
"signature_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_SIGNATURE_VALUE",
"]",
",",
"output_value",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_OUTPUT_VALUE",
"]",
",",
"status_code",
"=",
"required_values",
"[",
"cls",
".",
"_KEY_STATUS_CODE",
"]",
",",
")"
] |
43e0c3d1e186e71387f072daf98911abb14469dd
|
test
|
MinifiedJsTemplateResponse.rendered_content
|
Returns a 'minified' version of the javascript content
|
djanalytics/views.py
|
def rendered_content(self):
"""Returns a 'minified' version of the javascript content"""
template = self.resolve_template(self.template_name)
if django.VERSION[1] < 8:
if template.name.endswith('.min'):
return super(MinifiedJsTemplateResponse, self).rendered_content
else:
if template.template.name.endswith('.min'):
return super(MinifiedJsTemplateResponse, self).rendered_content
# if no minified template exists, minify the response
content = super(MinifiedJsTemplateResponse, self).rendered_content
content = jsmin.jsmin(content)
return content
|
def rendered_content(self):
"""Returns a 'minified' version of the javascript content"""
template = self.resolve_template(self.template_name)
if django.VERSION[1] < 8:
if template.name.endswith('.min'):
return super(MinifiedJsTemplateResponse, self).rendered_content
else:
if template.template.name.endswith('.min'):
return super(MinifiedJsTemplateResponse, self).rendered_content
# if no minified template exists, minify the response
content = super(MinifiedJsTemplateResponse, self).rendered_content
content = jsmin.jsmin(content)
return content
|
[
"Returns",
"a",
"minified",
"version",
"of",
"the",
"javascript",
"content"
] |
analytehealth/django-analytics
|
python
|
https://github.com/analytehealth/django-analytics/blob/7782d3f81249dcb1b266afb0cb1e90000108c74d/djanalytics/views.py#L123-L135
|
[
"def",
"rendered_content",
"(",
"self",
")",
":",
"template",
"=",
"self",
".",
"resolve_template",
"(",
"self",
".",
"template_name",
")",
"if",
"django",
".",
"VERSION",
"[",
"1",
"]",
"<",
"8",
":",
"if",
"template",
".",
"name",
".",
"endswith",
"(",
"'.min'",
")",
":",
"return",
"super",
"(",
"MinifiedJsTemplateResponse",
",",
"self",
")",
".",
"rendered_content",
"else",
":",
"if",
"template",
".",
"template",
".",
"name",
".",
"endswith",
"(",
"'.min'",
")",
":",
"return",
"super",
"(",
"MinifiedJsTemplateResponse",
",",
"self",
")",
".",
"rendered_content",
"# if no minified template exists, minify the response",
"content",
"=",
"super",
"(",
"MinifiedJsTemplateResponse",
",",
"self",
")",
".",
"rendered_content",
"content",
"=",
"jsmin",
".",
"jsmin",
"(",
"content",
")",
"return",
"content"
] |
7782d3f81249dcb1b266afb0cb1e90000108c74d
|
test
|
LogFollower.get_fn
|
Passes each parsed log line to `fn`
This is a better idea than storing a giant log file in memory
|
tensor/logs/follower.py
|
def get_fn(self, fn, max_lines=None):
"""Passes each parsed log line to `fn`
This is a better idea than storing a giant log file in memory
"""
stat = os.stat(self.logfile)
if (stat.st_ino == self.lastInode) and (stat.st_size == self.lastSize):
# Nothing new
return []
# Handle rollover and rotations vaguely
if (stat.st_ino != self.lastInode) or (stat.st_size < self.lastSize):
self.lastSize = 0
fi = open(self.logfile, 'rt')
fi.seek(self.lastSize)
self.lastInode = stat.st_ino
lines = 0
for i in fi:
lines += 1
if max_lines and (lines > max_lines):
self.storeLast()
fi.close()
return
if '\n' in i:
self.lastSize += len(i)
if self.parser:
line = self.parser(i.strip('\n'))
else:
line = i.strip('\n')
fn(line)
self.storeLast()
fi.close()
|
def get_fn(self, fn, max_lines=None):
"""Passes each parsed log line to `fn`
This is a better idea than storing a giant log file in memory
"""
stat = os.stat(self.logfile)
if (stat.st_ino == self.lastInode) and (stat.st_size == self.lastSize):
# Nothing new
return []
# Handle rollover and rotations vaguely
if (stat.st_ino != self.lastInode) or (stat.st_size < self.lastSize):
self.lastSize = 0
fi = open(self.logfile, 'rt')
fi.seek(self.lastSize)
self.lastInode = stat.st_ino
lines = 0
for i in fi:
lines += 1
if max_lines and (lines > max_lines):
self.storeLast()
fi.close()
return
if '\n' in i:
self.lastSize += len(i)
if self.parser:
line = self.parser(i.strip('\n'))
else:
line = i.strip('\n')
fn(line)
self.storeLast()
fi.close()
|
[
"Passes",
"each",
"parsed",
"log",
"line",
"to",
"fn",
"This",
"is",
"a",
"better",
"idea",
"than",
"storing",
"a",
"giant",
"log",
"file",
"in",
"memory"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/logs/follower.py#L47-L86
|
[
"def",
"get_fn",
"(",
"self",
",",
"fn",
",",
"max_lines",
"=",
"None",
")",
":",
"stat",
"=",
"os",
".",
"stat",
"(",
"self",
".",
"logfile",
")",
"if",
"(",
"stat",
".",
"st_ino",
"==",
"self",
".",
"lastInode",
")",
"and",
"(",
"stat",
".",
"st_size",
"==",
"self",
".",
"lastSize",
")",
":",
"# Nothing new",
"return",
"[",
"]",
"# Handle rollover and rotations vaguely",
"if",
"(",
"stat",
".",
"st_ino",
"!=",
"self",
".",
"lastInode",
")",
"or",
"(",
"stat",
".",
"st_size",
"<",
"self",
".",
"lastSize",
")",
":",
"self",
".",
"lastSize",
"=",
"0",
"fi",
"=",
"open",
"(",
"self",
".",
"logfile",
",",
"'rt'",
")",
"fi",
".",
"seek",
"(",
"self",
".",
"lastSize",
")",
"self",
".",
"lastInode",
"=",
"stat",
".",
"st_ino",
"lines",
"=",
"0",
"for",
"i",
"in",
"fi",
":",
"lines",
"+=",
"1",
"if",
"max_lines",
"and",
"(",
"lines",
">",
"max_lines",
")",
":",
"self",
".",
"storeLast",
"(",
")",
"fi",
".",
"close",
"(",
")",
"return",
"if",
"'\\n'",
"in",
"i",
":",
"self",
".",
"lastSize",
"+=",
"len",
"(",
"i",
")",
"if",
"self",
".",
"parser",
":",
"line",
"=",
"self",
".",
"parser",
"(",
"i",
".",
"strip",
"(",
"'\\n'",
")",
")",
"else",
":",
"line",
"=",
"i",
".",
"strip",
"(",
"'\\n'",
")",
"fn",
"(",
"line",
")",
"self",
".",
"storeLast",
"(",
")",
"fi",
".",
"close",
"(",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
LogFollower.get
|
Returns a big list of all log lines since the last run
|
tensor/logs/follower.py
|
def get(self, max_lines=None):
"""Returns a big list of all log lines since the last run
"""
rows = []
self.get_fn(lambda row: rows.append(row), max_lines=max_lines)
return rows
|
def get(self, max_lines=None):
"""Returns a big list of all log lines since the last run
"""
rows = []
self.get_fn(lambda row: rows.append(row), max_lines=max_lines)
return rows
|
[
"Returns",
"a",
"big",
"list",
"of",
"all",
"log",
"lines",
"since",
"the",
"last",
"run"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/logs/follower.py#L88-L95
|
[
"def",
"get",
"(",
"self",
",",
"max_lines",
"=",
"None",
")",
":",
"rows",
"=",
"[",
"]",
"self",
".",
"get_fn",
"(",
"lambda",
"row",
":",
"rows",
".",
"append",
"(",
"row",
")",
",",
"max_lines",
"=",
"max_lines",
")",
"return",
"rows"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
TokenMixin.create_token
|
Create a token referencing the object id with extra data.
Note random data is added to ensure that no two tokens are identical.
|
zenodo_accessrequests/tokens.py
|
def create_token(self, obj_id, extra_data):
"""Create a token referencing the object id with extra data.
Note random data is added to ensure that no two tokens are identical.
"""
return self.dumps(
dict(
id=obj_id,
data=extra_data,
rnd=binascii.hexlify(os.urandom(4)).decode('utf-8')
)
)
|
def create_token(self, obj_id, extra_data):
"""Create a token referencing the object id with extra data.
Note random data is added to ensure that no two tokens are identical.
"""
return self.dumps(
dict(
id=obj_id,
data=extra_data,
rnd=binascii.hexlify(os.urandom(4)).decode('utf-8')
)
)
|
[
"Create",
"a",
"token",
"referencing",
"the",
"object",
"id",
"with",
"extra",
"data",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L42-L53
|
[
"def",
"create_token",
"(",
"self",
",",
"obj_id",
",",
"extra_data",
")",
":",
"return",
"self",
".",
"dumps",
"(",
"dict",
"(",
"id",
"=",
"obj_id",
",",
"data",
"=",
"extra_data",
",",
"rnd",
"=",
"binascii",
".",
"hexlify",
"(",
"os",
".",
"urandom",
"(",
"4",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
TokenMixin.validate_token
|
Validate secret link token.
:param token: Token value.
:param expected_data: A dictionary of key/values that must be present
in the data part of the token (i.e. included via ``extra_data`` in
``create_token``).
|
zenodo_accessrequests/tokens.py
|
def validate_token(self, token, expected_data=None):
"""Validate secret link token.
:param token: Token value.
:param expected_data: A dictionary of key/values that must be present
in the data part of the token (i.e. included via ``extra_data`` in
``create_token``).
"""
try:
# Load token and remove random data.
data = self.load_token(token)
# Compare expected data with data in token.
if expected_data:
for k in expected_data:
if expected_data[k] != data["data"].get(k):
return None
return data
except BadData:
return None
|
def validate_token(self, token, expected_data=None):
"""Validate secret link token.
:param token: Token value.
:param expected_data: A dictionary of key/values that must be present
in the data part of the token (i.e. included via ``extra_data`` in
``create_token``).
"""
try:
# Load token and remove random data.
data = self.load_token(token)
# Compare expected data with data in token.
if expected_data:
for k in expected_data:
if expected_data[k] != data["data"].get(k):
return None
return data
except BadData:
return None
|
[
"Validate",
"secret",
"link",
"token",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L55-L74
|
[
"def",
"validate_token",
"(",
"self",
",",
"token",
",",
"expected_data",
"=",
"None",
")",
":",
"try",
":",
"# Load token and remove random data.",
"data",
"=",
"self",
".",
"load_token",
"(",
"token",
")",
"# Compare expected data with data in token.",
"if",
"expected_data",
":",
"for",
"k",
"in",
"expected_data",
":",
"if",
"expected_data",
"[",
"k",
"]",
"!=",
"data",
"[",
"\"data\"",
"]",
".",
"get",
"(",
"k",
")",
":",
"return",
"None",
"return",
"data",
"except",
"BadData",
":",
"return",
"None"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
TokenMixin.load_token
|
Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
|
zenodo_accessrequests/tokens.py
|
def load_token(self, token, force=False):
"""Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
"""
try:
data = self.loads(token)
except SignatureExpired as e:
if not force:
raise
data = e.payload
del data["rnd"]
return data
|
def load_token(self, token, force=False):
"""Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
"""
try:
data = self.loads(token)
except SignatureExpired as e:
if not force:
raise
data = e.payload
del data["rnd"]
return data
|
[
"Load",
"data",
"in",
"a",
"token",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L76-L91
|
[
"def",
"load_token",
"(",
"self",
",",
"token",
",",
"force",
"=",
"False",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"loads",
"(",
"token",
")",
"except",
"SignatureExpired",
"as",
"e",
":",
"if",
"not",
"force",
":",
"raise",
"data",
"=",
"e",
".",
"payload",
"del",
"data",
"[",
"\"rnd\"",
"]",
"return",
"data"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
EncryptedTokenMixIn.engine
|
Get cryptographic engine.
|
zenodo_accessrequests/tokens.py
|
def engine(self):
"""Get cryptographic engine."""
if not hasattr(self, '_engine'):
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(current_app.config['SECRET_KEY'].encode('utf8'))
fernet_key = urlsafe_b64encode(digest.finalize())
self._engine = Fernet(fernet_key)
return self._engine
|
def engine(self):
"""Get cryptographic engine."""
if not hasattr(self, '_engine'):
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(current_app.config['SECRET_KEY'].encode('utf8'))
fernet_key = urlsafe_b64encode(digest.finalize())
self._engine = Fernet(fernet_key)
return self._engine
|
[
"Get",
"cryptographic",
"engine",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L98-L109
|
[
"def",
"engine",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_engine'",
")",
":",
"from",
"cryptography",
".",
"fernet",
"import",
"Fernet",
"from",
"cryptography",
".",
"hazmat",
".",
"backends",
"import",
"default_backend",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
"import",
"hashes",
"digest",
"=",
"hashes",
".",
"Hash",
"(",
"hashes",
".",
"SHA256",
"(",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"digest",
".",
"update",
"(",
"current_app",
".",
"config",
"[",
"'SECRET_KEY'",
"]",
".",
"encode",
"(",
"'utf8'",
")",
")",
"fernet_key",
"=",
"urlsafe_b64encode",
"(",
"digest",
".",
"finalize",
"(",
")",
")",
"self",
".",
"_engine",
"=",
"Fernet",
"(",
"fernet_key",
")",
"return",
"self",
".",
"_engine"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
EncryptedTokenMixIn.create_token
|
Create a token referencing the object id with extra data.
|
zenodo_accessrequests/tokens.py
|
def create_token(self, obj_id, extra_data):
"""Create a token referencing the object id with extra data."""
return self.engine.encrypt(
super(EncryptedTokenMixIn, self).create_token(obj_id, extra_data)
)
|
def create_token(self, obj_id, extra_data):
"""Create a token referencing the object id with extra data."""
return self.engine.encrypt(
super(EncryptedTokenMixIn, self).create_token(obj_id, extra_data)
)
|
[
"Create",
"a",
"token",
"referencing",
"the",
"object",
"id",
"with",
"extra",
"data",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L111-L115
|
[
"def",
"create_token",
"(",
"self",
",",
"obj_id",
",",
"extra_data",
")",
":",
"return",
"self",
".",
"engine",
".",
"encrypt",
"(",
"super",
"(",
"EncryptedTokenMixIn",
",",
"self",
")",
".",
"create_token",
"(",
"obj_id",
",",
"extra_data",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
EncryptedTokenMixIn.load_token
|
Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
|
zenodo_accessrequests/tokens.py
|
def load_token(self, token, force=False):
"""Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
"""
return super(EncryptedTokenMixIn, self).load_token(
self.engine.decrypt(token), force=force
)
|
def load_token(self, token, force=False):
"""Load data in a token.
:param token: Token to load.
:param force: Load token data even if signature expired.
Default: False.
"""
return super(EncryptedTokenMixIn, self).load_token(
self.engine.decrypt(token), force=force
)
|
[
"Load",
"data",
"in",
"a",
"token",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L117-L126
|
[
"def",
"load_token",
"(",
"self",
",",
"token",
",",
"force",
"=",
"False",
")",
":",
"return",
"super",
"(",
"EncryptedTokenMixIn",
",",
"self",
")",
".",
"load_token",
"(",
"self",
".",
"engine",
".",
"decrypt",
"(",
"token",
")",
",",
"force",
"=",
"force",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
EmailConfirmationSerializer.compat_validate_token
|
Multiple algorithm-compatible token validation.
|
zenodo_accessrequests/tokens.py
|
def compat_validate_token(cls, *args, **kwargs):
"""Multiple algorithm-compatible token validation."""
data = None
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
data = cls(algorithm_name=algorithm).validate_token(
*args, **kwargs)
if not data: # move to next algorithm
continue
return data
|
def compat_validate_token(cls, *args, **kwargs):
"""Multiple algorithm-compatible token validation."""
data = None
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
data = cls(algorithm_name=algorithm).validate_token(
*args, **kwargs)
if not data: # move to next algorithm
continue
return data
|
[
"Multiple",
"algorithm",
"-",
"compatible",
"token",
"validation",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L151-L159
|
[
"def",
"compat_validate_token",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"None",
"for",
"algorithm",
"in",
"SUPPORTED_DIGEST_ALGORITHMS",
":",
"data",
"=",
"cls",
"(",
"algorithm_name",
"=",
"algorithm",
")",
".",
"validate_token",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"data",
":",
"# move to next algorithm",
"continue",
"return",
"data"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
SecretLinkFactory.create_token
|
Create the secret link token.
|
zenodo_accessrequests/tokens.py
|
def create_token(cls, obj_id, data, expires_at=None):
"""Create the secret link token."""
if expires_at:
s = TimedSecretLinkSerializer(expires_at=expires_at)
else:
s = SecretLinkSerializer()
return s.create_token(obj_id, data)
|
def create_token(cls, obj_id, data, expires_at=None):
"""Create the secret link token."""
if expires_at:
s = TimedSecretLinkSerializer(expires_at=expires_at)
else:
s = SecretLinkSerializer()
return s.create_token(obj_id, data)
|
[
"Create",
"the",
"secret",
"link",
"token",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L196-L203
|
[
"def",
"create_token",
"(",
"cls",
",",
"obj_id",
",",
"data",
",",
"expires_at",
"=",
"None",
")",
":",
"if",
"expires_at",
":",
"s",
"=",
"TimedSecretLinkSerializer",
"(",
"expires_at",
"=",
"expires_at",
")",
"else",
":",
"s",
"=",
"SecretLinkSerializer",
"(",
")",
"return",
"s",
".",
"create_token",
"(",
"obj_id",
",",
"data",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
SecretLinkFactory.validate_token
|
Validate a secret link token (non-expiring + expiring).
|
zenodo_accessrequests/tokens.py
|
def validate_token(cls, token, expected_data=None):
"""Validate a secret link token (non-expiring + expiring)."""
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
s = SecretLinkSerializer(algorithm_name=algorithm)
st = TimedSecretLinkSerializer(algorithm_name=algorithm)
try:
for serializer in (s, st):
data = serializer.validate_token(
token, expected_data=expected_data)
if data:
return data
except SignatureExpired: # move to next algorithm
raise
except BadData:
continue
|
def validate_token(cls, token, expected_data=None):
"""Validate a secret link token (non-expiring + expiring)."""
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
s = SecretLinkSerializer(algorithm_name=algorithm)
st = TimedSecretLinkSerializer(algorithm_name=algorithm)
try:
for serializer in (s, st):
data = serializer.validate_token(
token, expected_data=expected_data)
if data:
return data
except SignatureExpired: # move to next algorithm
raise
except BadData:
continue
|
[
"Validate",
"a",
"secret",
"link",
"token",
"(",
"non",
"-",
"expiring",
"+",
"expiring",
")",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L206-L221
|
[
"def",
"validate_token",
"(",
"cls",
",",
"token",
",",
"expected_data",
"=",
"None",
")",
":",
"for",
"algorithm",
"in",
"SUPPORTED_DIGEST_ALGORITHMS",
":",
"s",
"=",
"SecretLinkSerializer",
"(",
"algorithm_name",
"=",
"algorithm",
")",
"st",
"=",
"TimedSecretLinkSerializer",
"(",
"algorithm_name",
"=",
"algorithm",
")",
"try",
":",
"for",
"serializer",
"in",
"(",
"s",
",",
"st",
")",
":",
"data",
"=",
"serializer",
".",
"validate_token",
"(",
"token",
",",
"expected_data",
"=",
"expected_data",
")",
"if",
"data",
":",
"return",
"data",
"except",
"SignatureExpired",
":",
"# move to next algorithm",
"raise",
"except",
"BadData",
":",
"continue"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
SecretLinkFactory.load_token
|
Validate a secret link token (non-expiring + expiring).
|
zenodo_accessrequests/tokens.py
|
def load_token(cls, token, force=False):
"""Validate a secret link token (non-expiring + expiring)."""
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
s = SecretLinkSerializer(algorithm_name=algorithm)
st = TimedSecretLinkSerializer(algorithm_name=algorithm)
for serializer in (s, st):
try:
data = serializer.load_token(token, force=force)
if data:
return data
except SignatureExpired:
raise # signature was parsed and is expired
except BadData:
continue
|
def load_token(cls, token, force=False):
"""Validate a secret link token (non-expiring + expiring)."""
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
s = SecretLinkSerializer(algorithm_name=algorithm)
st = TimedSecretLinkSerializer(algorithm_name=algorithm)
for serializer in (s, st):
try:
data = serializer.load_token(token, force=force)
if data:
return data
except SignatureExpired:
raise # signature was parsed and is expired
except BadData:
continue
|
[
"Validate",
"a",
"secret",
"link",
"token",
"(",
"non",
"-",
"expiring",
"+",
"expiring",
")",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L224-L237
|
[
"def",
"load_token",
"(",
"cls",
",",
"token",
",",
"force",
"=",
"False",
")",
":",
"for",
"algorithm",
"in",
"SUPPORTED_DIGEST_ALGORITHMS",
":",
"s",
"=",
"SecretLinkSerializer",
"(",
"algorithm_name",
"=",
"algorithm",
")",
"st",
"=",
"TimedSecretLinkSerializer",
"(",
"algorithm_name",
"=",
"algorithm",
")",
"for",
"serializer",
"in",
"(",
"s",
",",
"st",
")",
":",
"try",
":",
"data",
"=",
"serializer",
".",
"load_token",
"(",
"token",
",",
"force",
"=",
"force",
")",
"if",
"data",
":",
"return",
"data",
"except",
"SignatureExpired",
":",
"raise",
"# signature was parsed and is expired",
"except",
"BadData",
":",
"continue"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
Counter32
|
32bit counter aggregator with wrapping
|
tensor/aggregators.py
|
def Counter32(a, b, delta):
"""32bit counter aggregator with wrapping
"""
if b < a:
c = 4294967295 - a
return (c + b) / float(delta)
return (b - a) / float(delta)
|
def Counter32(a, b, delta):
"""32bit counter aggregator with wrapping
"""
if b < a:
c = 4294967295 - a
return (c + b) / float(delta)
return (b - a) / float(delta)
|
[
"32bit",
"counter",
"aggregator",
"with",
"wrapping"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/aggregators.py#L1-L8
|
[
"def",
"Counter32",
"(",
"a",
",",
"b",
",",
"delta",
")",
":",
"if",
"b",
"<",
"a",
":",
"c",
"=",
"4294967295",
"-",
"a",
"return",
"(",
"c",
"+",
"b",
")",
"/",
"float",
"(",
"delta",
")",
"return",
"(",
"b",
"-",
"a",
")",
"/",
"float",
"(",
"delta",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
Counter64
|
64bit counter aggregator with wrapping
|
tensor/aggregators.py
|
def Counter64(a, b, delta):
"""64bit counter aggregator with wrapping
"""
if b < a:
c = 18446744073709551615 - a
return (c + b) / float(delta)
return (b - a) / float(delta)
|
def Counter64(a, b, delta):
"""64bit counter aggregator with wrapping
"""
if b < a:
c = 18446744073709551615 - a
return (c + b) / float(delta)
return (b - a) / float(delta)
|
[
"64bit",
"counter",
"aggregator",
"with",
"wrapping"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/aggregators.py#L10-L17
|
[
"def",
"Counter64",
"(",
"a",
",",
"b",
",",
"delta",
")",
":",
"if",
"b",
"<",
"a",
":",
"c",
"=",
"18446744073709551615",
"-",
"a",
"return",
"(",
"c",
"+",
"b",
")",
"/",
"float",
"(",
"delta",
")",
"return",
"(",
"b",
"-",
"a",
")",
"/",
"float",
"(",
"delta",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
Counter
|
Counter derivative
|
tensor/aggregators.py
|
def Counter(a, b, delta):
"""Counter derivative
"""
if b < a:
return None
return (b - a) / float(delta)
|
def Counter(a, b, delta):
"""Counter derivative
"""
if b < a:
return None
return (b - a) / float(delta)
|
[
"Counter",
"derivative"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/aggregators.py#L19-L25
|
[
"def",
"Counter",
"(",
"a",
",",
"b",
",",
"delta",
")",
":",
"if",
"b",
"<",
"a",
":",
"return",
"None",
"return",
"(",
"b",
"-",
"a",
")",
"/",
"float",
"(",
"delta",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
average_duration
|
Method to calculate and format an average duration safely
|
djanalytics/reports/utils.py
|
def average_duration(total_duration, visits):
""" Method to calculate and format an average duration safely """
if not visits:
seconds = 0
else:
seconds = int(round(total_duration / Decimal(visits)))
duration = timedelta(seconds=seconds)
return str(duration)
|
def average_duration(total_duration, visits):
""" Method to calculate and format an average duration safely """
if not visits:
seconds = 0
else:
seconds = int(round(total_duration / Decimal(visits)))
duration = timedelta(seconds=seconds)
return str(duration)
|
[
"Method",
"to",
"calculate",
"and",
"format",
"an",
"average",
"duration",
"safely"
] |
analytehealth/django-analytics
|
python
|
https://github.com/analytehealth/django-analytics/blob/7782d3f81249dcb1b266afb0cb1e90000108c74d/djanalytics/reports/utils.py#L12-L19
|
[
"def",
"average_duration",
"(",
"total_duration",
",",
"visits",
")",
":",
"if",
"not",
"visits",
":",
"seconds",
"=",
"0",
"else",
":",
"seconds",
"=",
"int",
"(",
"round",
"(",
"total_duration",
"/",
"Decimal",
"(",
"visits",
")",
")",
")",
"duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"return",
"str",
"(",
"duration",
")"
] |
7782d3f81249dcb1b266afb0cb1e90000108c74d
|
test
|
TensorService.setupOutputs
|
Setup output processors
|
tensor/service.py
|
def setupOutputs(self, config):
"""Setup output processors"""
if self.proto == 'tcp':
defaultOutput = {
'output': 'tensor.outputs.riemann.RiemannTCP',
'server': self.server,
'port': self.port
}
else:
defaultOutput = {
'output': 'tensor.outputs.riemann.RiemannUDP',
'server': self.server,
'port': self.port
}
outputs = config.get('outputs', [defaultOutput])
for output in outputs:
if not ('debug' in output):
output['debug'] = self.debug
cl = output['output'].split('.')[-1] # class
path = '.'.join(output['output'].split('.')[:-1]) # import path
# Import the module and construct the output object
outputObj = getattr(
importlib.import_module(path), cl)(output, self)
name = output.get('name', None)
# Add the output to our routing hash
if name in self.outputs:
self.outputs[name].append(outputObj)
else:
self.outputs[name] = [outputObj]
# connect the output
reactor.callLater(0, outputObj.createClient)
|
def setupOutputs(self, config):
"""Setup output processors"""
if self.proto == 'tcp':
defaultOutput = {
'output': 'tensor.outputs.riemann.RiemannTCP',
'server': self.server,
'port': self.port
}
else:
defaultOutput = {
'output': 'tensor.outputs.riemann.RiemannUDP',
'server': self.server,
'port': self.port
}
outputs = config.get('outputs', [defaultOutput])
for output in outputs:
if not ('debug' in output):
output['debug'] = self.debug
cl = output['output'].split('.')[-1] # class
path = '.'.join(output['output'].split('.')[:-1]) # import path
# Import the module and construct the output object
outputObj = getattr(
importlib.import_module(path), cl)(output, self)
name = output.get('name', None)
# Add the output to our routing hash
if name in self.outputs:
self.outputs[name].append(outputObj)
else:
self.outputs[name] = [outputObj]
# connect the output
reactor.callLater(0, outputObj.createClient)
|
[
"Setup",
"output",
"processors"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/service.py#L92-L130
|
[
"def",
"setupOutputs",
"(",
"self",
",",
"config",
")",
":",
"if",
"self",
".",
"proto",
"==",
"'tcp'",
":",
"defaultOutput",
"=",
"{",
"'output'",
":",
"'tensor.outputs.riemann.RiemannTCP'",
",",
"'server'",
":",
"self",
".",
"server",
",",
"'port'",
":",
"self",
".",
"port",
"}",
"else",
":",
"defaultOutput",
"=",
"{",
"'output'",
":",
"'tensor.outputs.riemann.RiemannUDP'",
",",
"'server'",
":",
"self",
".",
"server",
",",
"'port'",
":",
"self",
".",
"port",
"}",
"outputs",
"=",
"config",
".",
"get",
"(",
"'outputs'",
",",
"[",
"defaultOutput",
"]",
")",
"for",
"output",
"in",
"outputs",
":",
"if",
"not",
"(",
"'debug'",
"in",
"output",
")",
":",
"output",
"[",
"'debug'",
"]",
"=",
"self",
".",
"debug",
"cl",
"=",
"output",
"[",
"'output'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"# class",
"path",
"=",
"'.'",
".",
"join",
"(",
"output",
"[",
"'output'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"# import path",
"# Import the module and construct the output object",
"outputObj",
"=",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"path",
")",
",",
"cl",
")",
"(",
"output",
",",
"self",
")",
"name",
"=",
"output",
".",
"get",
"(",
"'name'",
",",
"None",
")",
"# Add the output to our routing hash",
"if",
"name",
"in",
"self",
".",
"outputs",
":",
"self",
".",
"outputs",
"[",
"name",
"]",
".",
"append",
"(",
"outputObj",
")",
"else",
":",
"self",
".",
"outputs",
"[",
"name",
"]",
"=",
"[",
"outputObj",
"]",
"# connect the output",
"reactor",
".",
"callLater",
"(",
"0",
",",
"outputObj",
".",
"createClient",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
TensorService.setupSources
|
Sets up source objects from the given config
|
tensor/service.py
|
def setupSources(self, config):
"""Sets up source objects from the given config"""
sources = config.get('sources', [])
for source in sources:
src = self.createSource(source)
self.setupTriggers(source, src)
self.sources.append(src)
|
def setupSources(self, config):
"""Sets up source objects from the given config"""
sources = config.get('sources', [])
for source in sources:
src = self.createSource(source)
self.setupTriggers(source, src)
self.sources.append(src)
|
[
"Sets",
"up",
"source",
"objects",
"from",
"the",
"given",
"config"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/service.py#L168-L176
|
[
"def",
"setupSources",
"(",
"self",
",",
"config",
")",
":",
"sources",
"=",
"config",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"for",
"source",
"in",
"sources",
":",
"src",
"=",
"self",
".",
"createSource",
"(",
"source",
")",
"self",
".",
"setupTriggers",
"(",
"source",
",",
"src",
")",
"self",
".",
"sources",
".",
"append",
"(",
"src",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
TensorService.sendEvent
|
Callback that all event sources call when they have a new event
or list of events
|
tensor/service.py
|
def sendEvent(self, source, events):
"""Callback that all event sources call when they have a new event
or list of events
"""
if isinstance(events, list):
self.eventCounter += len(events)
else:
self.eventCounter += 1
events = [events]
queue = self._aggregateQueue(events)
if queue:
if (source in self.critical) or (source in self.warn):
self.setStates(source, queue)
self.routeEvent(source, queue)
queue = []
self.lastEvents[source] = time.time()
|
def sendEvent(self, source, events):
"""Callback that all event sources call when they have a new event
or list of events
"""
if isinstance(events, list):
self.eventCounter += len(events)
else:
self.eventCounter += 1
events = [events]
queue = self._aggregateQueue(events)
if queue:
if (source in self.critical) or (source in self.warn):
self.setStates(source, queue)
self.routeEvent(source, queue)
queue = []
self.lastEvents[source] = time.time()
|
[
"Callback",
"that",
"all",
"event",
"sources",
"call",
"when",
"they",
"have",
"a",
"new",
"event",
"or",
"list",
"of",
"events"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/service.py#L234-L255
|
[
"def",
"sendEvent",
"(",
"self",
",",
"source",
",",
"events",
")",
":",
"if",
"isinstance",
"(",
"events",
",",
"list",
")",
":",
"self",
".",
"eventCounter",
"+=",
"len",
"(",
"events",
")",
"else",
":",
"self",
".",
"eventCounter",
"+=",
"1",
"events",
"=",
"[",
"events",
"]",
"queue",
"=",
"self",
".",
"_aggregateQueue",
"(",
"events",
")",
"if",
"queue",
":",
"if",
"(",
"source",
"in",
"self",
".",
"critical",
")",
"or",
"(",
"source",
"in",
"self",
".",
"warn",
")",
":",
"self",
".",
"setStates",
"(",
"source",
",",
"queue",
")",
"self",
".",
"routeEvent",
"(",
"source",
",",
"queue",
")",
"queue",
"=",
"[",
"]",
"self",
".",
"lastEvents",
"[",
"source",
"]",
"=",
"time",
".",
"time",
"(",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
TensorService.sourceWatchdog
|
Watchdog timer function.
Recreates sources which have not generated events in 10*interval if
they have watchdog set to true in their configuration
|
tensor/service.py
|
def sourceWatchdog(self):
"""Watchdog timer function.
Recreates sources which have not generated events in 10*interval if
they have watchdog set to true in their configuration
"""
for i, source in enumerate(self.sources):
if not source.config.get('watchdog', False):
continue
sn = repr(source)
last = self.lastEvents.get(source, None)
if last:
try:
if last < (time.time()-(source.inter*10)):
log.msg("Trying to restart stale source %s: %ss" % (
sn, int(time.time() - last)
))
s = self.sources.pop(i)
try:
s.t.stop()
except Exception as e:
log.msg("Could not stop timer for %s: %s" % (
sn, e))
config = copy.deepcopy(s.config)
del self.lastEvents[source]
del s, source
source = self.createSource(config)
reactor.callLater(0, self._startSource, source)
except Exception as e:
log.msg("Could not reset source %s: %s" % (
sn, e))
|
def sourceWatchdog(self):
"""Watchdog timer function.
Recreates sources which have not generated events in 10*interval if
they have watchdog set to true in their configuration
"""
for i, source in enumerate(self.sources):
if not source.config.get('watchdog', False):
continue
sn = repr(source)
last = self.lastEvents.get(source, None)
if last:
try:
if last < (time.time()-(source.inter*10)):
log.msg("Trying to restart stale source %s: %ss" % (
sn, int(time.time() - last)
))
s = self.sources.pop(i)
try:
s.t.stop()
except Exception as e:
log.msg("Could not stop timer for %s: %s" % (
sn, e))
config = copy.deepcopy(s.config)
del self.lastEvents[source]
del s, source
source = self.createSource(config)
reactor.callLater(0, self._startSource, source)
except Exception as e:
log.msg("Could not reset source %s: %s" % (
sn, e))
|
[
"Watchdog",
"timer",
"function",
"."
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/service.py#L285-L320
|
[
"def",
"sourceWatchdog",
"(",
"self",
")",
":",
"for",
"i",
",",
"source",
"in",
"enumerate",
"(",
"self",
".",
"sources",
")",
":",
"if",
"not",
"source",
".",
"config",
".",
"get",
"(",
"'watchdog'",
",",
"False",
")",
":",
"continue",
"sn",
"=",
"repr",
"(",
"source",
")",
"last",
"=",
"self",
".",
"lastEvents",
".",
"get",
"(",
"source",
",",
"None",
")",
"if",
"last",
":",
"try",
":",
"if",
"last",
"<",
"(",
"time",
".",
"time",
"(",
")",
"-",
"(",
"source",
".",
"inter",
"*",
"10",
")",
")",
":",
"log",
".",
"msg",
"(",
"\"Trying to restart stale source %s: %ss\"",
"%",
"(",
"sn",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"last",
")",
")",
")",
"s",
"=",
"self",
".",
"sources",
".",
"pop",
"(",
"i",
")",
"try",
":",
"s",
".",
"t",
".",
"stop",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"msg",
"(",
"\"Could not stop timer for %s: %s\"",
"%",
"(",
"sn",
",",
"e",
")",
")",
"config",
"=",
"copy",
".",
"deepcopy",
"(",
"s",
".",
"config",
")",
"del",
"self",
".",
"lastEvents",
"[",
"source",
"]",
"del",
"s",
",",
"source",
"source",
"=",
"self",
".",
"createSource",
"(",
"config",
")",
"reactor",
".",
"callLater",
"(",
"0",
",",
"self",
".",
"_startSource",
",",
"source",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"msg",
"(",
"\"Could not reset source %s: %s\"",
"%",
"(",
"sn",
",",
"e",
")",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
ApacheLogParser._parse_format
|
Converts the input format to a regular
expression, as well as extracting fields
Raises an exception if it couldn't compile
the generated regex.
|
tensor/logs/parsers.py
|
def _parse_format(self, format):
"""
Converts the input format to a regular
expression, as well as extracting fields
Raises an exception if it couldn't compile
the generated regex.
"""
format = format.strip()
format = re.sub('[ \t]+',' ',format)
subpatterns = []
findquotes = re.compile(r'^\\"')
findreferreragent = re.compile('Referer|User-Agent')
findpercent = re.compile('^%.*t$')
lstripquotes = re.compile(r'^\\"')
rstripquotes = re.compile(r'\\"$')
header = re.compile(r'.*%\{([^\}]+)\}i')
for element in format.split(' '):
hasquotes = 0
if findquotes.search(element): hasquotes = 1
if hasquotes:
element = lstripquotes.sub('', element)
element = rstripquotes.sub('', element)
head = header.match(element)
if head:
self._names.append(head.groups()[0].lower())
self._types.append(str)
else:
self._names.append(self.alias(element))
self._types.append(self.types.get(element, [None, str])[1])
subpattern = '(\S*)'
if hasquotes:
if element == '%r' or findreferreragent.search(element):
subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"'
else:
subpattern = r'\"([^\"]*)\"'
elif findpercent.search(element):
subpattern = r'(\[[^\]]+\])'
elif element == '%U':
subpattern = '(.+?)'
subpatterns.append(subpattern)
self._pattern = '^' + ' '.join(subpatterns) + '$'
try:
self._regex = re.compile(self._pattern)
except Exception as e:
raise ApacheLogParserError(e)
|
def _parse_format(self, format):
"""
Converts the input format to a regular
expression, as well as extracting fields
Raises an exception if it couldn't compile
the generated regex.
"""
format = format.strip()
format = re.sub('[ \t]+',' ',format)
subpatterns = []
findquotes = re.compile(r'^\\"')
findreferreragent = re.compile('Referer|User-Agent')
findpercent = re.compile('^%.*t$')
lstripquotes = re.compile(r'^\\"')
rstripquotes = re.compile(r'\\"$')
header = re.compile(r'.*%\{([^\}]+)\}i')
for element in format.split(' '):
hasquotes = 0
if findquotes.search(element): hasquotes = 1
if hasquotes:
element = lstripquotes.sub('', element)
element = rstripquotes.sub('', element)
head = header.match(element)
if head:
self._names.append(head.groups()[0].lower())
self._types.append(str)
else:
self._names.append(self.alias(element))
self._types.append(self.types.get(element, [None, str])[1])
subpattern = '(\S*)'
if hasquotes:
if element == '%r' or findreferreragent.search(element):
subpattern = r'\"([^"\\]*(?:\\.[^"\\]*)*)\"'
else:
subpattern = r'\"([^\"]*)\"'
elif findpercent.search(element):
subpattern = r'(\[[^\]]+\])'
elif element == '%U':
subpattern = '(.+?)'
subpatterns.append(subpattern)
self._pattern = '^' + ' '.join(subpatterns) + '$'
try:
self._regex = re.compile(self._pattern)
except Exception as e:
raise ApacheLogParserError(e)
|
[
"Converts",
"the",
"input",
"format",
"to",
"a",
"regular",
"expression",
"as",
"well",
"as",
"extracting",
"fields"
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/logs/parsers.py#L65-L122
|
[
"def",
"_parse_format",
"(",
"self",
",",
"format",
")",
":",
"format",
"=",
"format",
".",
"strip",
"(",
")",
"format",
"=",
"re",
".",
"sub",
"(",
"'[ \\t]+'",
",",
"' '",
",",
"format",
")",
"subpatterns",
"=",
"[",
"]",
"findquotes",
"=",
"re",
".",
"compile",
"(",
"r'^\\\\\"'",
")",
"findreferreragent",
"=",
"re",
".",
"compile",
"(",
"'Referer|User-Agent'",
")",
"findpercent",
"=",
"re",
".",
"compile",
"(",
"'^%.*t$'",
")",
"lstripquotes",
"=",
"re",
".",
"compile",
"(",
"r'^\\\\\"'",
")",
"rstripquotes",
"=",
"re",
".",
"compile",
"(",
"r'\\\\\"$'",
")",
"header",
"=",
"re",
".",
"compile",
"(",
"r'.*%\\{([^\\}]+)\\}i'",
")",
"for",
"element",
"in",
"format",
".",
"split",
"(",
"' '",
")",
":",
"hasquotes",
"=",
"0",
"if",
"findquotes",
".",
"search",
"(",
"element",
")",
":",
"hasquotes",
"=",
"1",
"if",
"hasquotes",
":",
"element",
"=",
"lstripquotes",
".",
"sub",
"(",
"''",
",",
"element",
")",
"element",
"=",
"rstripquotes",
".",
"sub",
"(",
"''",
",",
"element",
")",
"head",
"=",
"header",
".",
"match",
"(",
"element",
")",
"if",
"head",
":",
"self",
".",
"_names",
".",
"append",
"(",
"head",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"self",
".",
"_types",
".",
"append",
"(",
"str",
")",
"else",
":",
"self",
".",
"_names",
".",
"append",
"(",
"self",
".",
"alias",
"(",
"element",
")",
")",
"self",
".",
"_types",
".",
"append",
"(",
"self",
".",
"types",
".",
"get",
"(",
"element",
",",
"[",
"None",
",",
"str",
"]",
")",
"[",
"1",
"]",
")",
"subpattern",
"=",
"'(\\S*)'",
"if",
"hasquotes",
":",
"if",
"element",
"==",
"'%r'",
"or",
"findreferreragent",
".",
"search",
"(",
"element",
")",
":",
"subpattern",
"=",
"r'\\\"([^\"\\\\]*(?:\\\\.[^\"\\\\]*)*)\\\"'",
"else",
":",
"subpattern",
"=",
"r'\\\"([^\\\"]*)\\\"'",
"elif",
"findpercent",
".",
"search",
"(",
"element",
")",
":",
"subpattern",
"=",
"r'(\\[[^\\]]+\\])'",
"elif",
"element",
"==",
"'%U'",
":",
"subpattern",
"=",
"'(.+?)'",
"subpatterns",
".",
"append",
"(",
"subpattern",
")",
"self",
".",
"_pattern",
"=",
"'^'",
"+",
"' '",
".",
"join",
"(",
"subpatterns",
")",
"+",
"'$'",
"try",
":",
"self",
".",
"_regex",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"_pattern",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ApacheLogParserError",
"(",
"e",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
ApacheLogParser.parse
|
Parses a single line from the log file and returns
a dictionary of it's contents.
Raises and exception if it couldn't parse the line
|
tensor/logs/parsers.py
|
def parse(self, line):
"""
Parses a single line from the log file and returns
a dictionary of it's contents.
Raises and exception if it couldn't parse the line
"""
line = line.strip()
match = self._regex.match(line)
if match:
data = {}
for i, e in enumerate(match.groups()):
if e == "-":
k, v = self._names[i], None
else:
k, v = self._names[i], self._types[i](e)
data[k] = v
return data
raise ApacheLogParserError("Unable to parse: %s" % line)
|
def parse(self, line):
"""
Parses a single line from the log file and returns
a dictionary of it's contents.
Raises and exception if it couldn't parse the line
"""
line = line.strip()
match = self._regex.match(line)
if match:
data = {}
for i, e in enumerate(match.groups()):
if e == "-":
k, v = self._names[i], None
else:
k, v = self._names[i], self._types[i](e)
data[k] = v
return data
raise ApacheLogParserError("Unable to parse: %s" % line)
|
[
"Parses",
"a",
"single",
"line",
"from",
"the",
"log",
"file",
"and",
"returns",
"a",
"dictionary",
"of",
"it",
"s",
"contents",
"."
] |
calston/tensor
|
python
|
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/logs/parsers.py#L124-L144
|
[
"def",
"parse",
"(",
"self",
",",
"line",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"match",
"=",
"self",
".",
"_regex",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"data",
"=",
"{",
"}",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"match",
".",
"groups",
"(",
")",
")",
":",
"if",
"e",
"==",
"\"-\"",
":",
"k",
",",
"v",
"=",
"self",
".",
"_names",
"[",
"i",
"]",
",",
"None",
"else",
":",
"k",
",",
"v",
"=",
"self",
".",
"_names",
"[",
"i",
"]",
",",
"self",
".",
"_types",
"[",
"i",
"]",
"(",
"e",
")",
"data",
"[",
"k",
"]",
"=",
"v",
"return",
"data",
"raise",
"ApacheLogParserError",
"(",
"\"Unable to parse: %s\"",
"%",
"line",
")"
] |
7c0c99708b5dbff97f3895f705e11996b608549d
|
test
|
validate_expires_at
|
Validate that date is in the future.
|
zenodo_accessrequests/forms.py
|
def validate_expires_at(form, field):
"""Validate that date is in the future."""
if form.accept.data:
if not field.data or datetime.utcnow().date() >= field.data:
raise validators.StopValidation(_(
"Please provide a future date."
))
if not field.data or \
datetime.utcnow().date() + timedelta(days=365) < field.data:
raise validators.StopValidation(_(
"Please provide a date no more than 1 year into the future."
))
|
def validate_expires_at(form, field):
"""Validate that date is in the future."""
if form.accept.data:
if not field.data or datetime.utcnow().date() >= field.data:
raise validators.StopValidation(_(
"Please provide a future date."
))
if not field.data or \
datetime.utcnow().date() + timedelta(days=365) < field.data:
raise validators.StopValidation(_(
"Please provide a date no more than 1 year into the future."
))
|
[
"Validate",
"that",
"date",
"is",
"in",
"the",
"future",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/forms.py#L39-L50
|
[
"def",
"validate_expires_at",
"(",
"form",
",",
"field",
")",
":",
"if",
"form",
".",
"accept",
".",
"data",
":",
"if",
"not",
"field",
".",
"data",
"or",
"datetime",
".",
"utcnow",
"(",
")",
".",
"date",
"(",
")",
">=",
"field",
".",
"data",
":",
"raise",
"validators",
".",
"StopValidation",
"(",
"_",
"(",
"\"Please provide a future date.\"",
")",
")",
"if",
"not",
"field",
".",
"data",
"or",
"datetime",
".",
"utcnow",
"(",
")",
".",
"date",
"(",
")",
"+",
"timedelta",
"(",
"days",
"=",
"365",
")",
"<",
"field",
".",
"data",
":",
"raise",
"validators",
".",
"StopValidation",
"(",
"_",
"(",
"\"Please provide a date no more than 1 year into the future.\"",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
ApprovalForm.validate_accept
|
Validate that accept have not been set.
|
zenodo_accessrequests/forms.py
|
def validate_accept(form, field):
"""Validate that accept have not been set."""
if field.data and form.reject.data:
raise validators.ValidationError(
_("Both reject and accept cannot be set at the same time.")
)
|
def validate_accept(form, field):
"""Validate that accept have not been set."""
if field.data and form.reject.data:
raise validators.ValidationError(
_("Both reject and accept cannot be set at the same time.")
)
|
[
"Validate",
"that",
"accept",
"have",
"not",
"been",
"set",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/forms.py#L105-L110
|
[
"def",
"validate_accept",
"(",
"form",
",",
"field",
")",
":",
"if",
"field",
".",
"data",
"and",
"form",
".",
"reject",
".",
"data",
":",
"raise",
"validators",
".",
"ValidationError",
"(",
"_",
"(",
"\"Both reject and accept cannot be set at the same time.\"",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
ApprovalForm.validate_reject
|
Validate that accept have not been set.
|
zenodo_accessrequests/forms.py
|
def validate_reject(form, field):
"""Validate that accept have not been set."""
if field.data and form.accept.data:
raise validators.ValidationError(
_("Both reject and accept cannot be set at the same time.")
)
|
def validate_reject(form, field):
"""Validate that accept have not been set."""
if field.data and form.accept.data:
raise validators.ValidationError(
_("Both reject and accept cannot be set at the same time.")
)
|
[
"Validate",
"that",
"accept",
"have",
"not",
"been",
"set",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/forms.py#L112-L117
|
[
"def",
"validate_reject",
"(",
"form",
",",
"field",
")",
":",
"if",
"field",
".",
"data",
"and",
"form",
".",
"accept",
".",
"data",
":",
"raise",
"validators",
".",
"ValidationError",
"(",
"_",
"(",
"\"Both reject and accept cannot be set at the same time.\"",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
ApprovalForm.validate_message
|
Validate message.
|
zenodo_accessrequests/forms.py
|
def validate_message(form, field):
"""Validate message."""
if form.reject.data and not field.data.strip():
raise validators.ValidationError(
_("You are required to provide message to the requester when"
" you reject a request.")
)
|
def validate_message(form, field):
"""Validate message."""
if form.reject.data and not field.data.strip():
raise validators.ValidationError(
_("You are required to provide message to the requester when"
" you reject a request.")
)
|
[
"Validate",
"message",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/forms.py#L119-L125
|
[
"def",
"validate_message",
"(",
"form",
",",
"field",
")",
":",
"if",
"form",
".",
"reject",
".",
"data",
"and",
"not",
"field",
".",
"data",
".",
"strip",
"(",
")",
":",
"raise",
"validators",
".",
"ValidationError",
"(",
"_",
"(",
"\"You are required to provide message to the requester when\"",
"\" you reject a request.\"",
")",
")"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
verify_token
|
Verify token and save in session if it's valid.
|
zenodo_accessrequests/ext.py
|
def verify_token():
"""Verify token and save in session if it's valid."""
try:
from .models import SecretLink
token = request.args['token']
# if the token is valid
if token and SecretLink.validate_token(token, {}):
# then save in session the token
session['accessrequests-secret-token'] = token
except KeyError:
pass
|
def verify_token():
"""Verify token and save in session if it's valid."""
try:
from .models import SecretLink
token = request.args['token']
# if the token is valid
if token and SecretLink.validate_token(token, {}):
# then save in session the token
session['accessrequests-secret-token'] = token
except KeyError:
pass
|
[
"Verify",
"token",
"and",
"save",
"in",
"session",
"if",
"it",
"s",
"valid",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/ext.py#L34-L44
|
[
"def",
"verify_token",
"(",
")",
":",
"try",
":",
"from",
".",
"models",
"import",
"SecretLink",
"token",
"=",
"request",
".",
"args",
"[",
"'token'",
"]",
"# if the token is valid",
"if",
"token",
"and",
"SecretLink",
".",
"validate_token",
"(",
"token",
",",
"{",
"}",
")",
":",
"# then save in session the token",
"session",
"[",
"'accessrequests-secret-token'",
"]",
"=",
"token",
"except",
"KeyError",
":",
"pass"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
ZenodoAccessRequests.init_app
|
Flask application initialization.
|
zenodo_accessrequests/ext.py
|
def init_app(self, app):
"""Flask application initialization."""
app.before_request(verify_token)
self.init_config(app)
state = _AppState(app=app)
app.extensions['zenodo-accessrequests'] = state
|
def init_app(self, app):
"""Flask application initialization."""
app.before_request(verify_token)
self.init_config(app)
state = _AppState(app=app)
app.extensions['zenodo-accessrequests'] = state
|
[
"Flask",
"application",
"initialization",
"."
] |
zenodo/zenodo-accessrequests
|
python
|
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/ext.py#L64-L69
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"app",
".",
"before_request",
"(",
"verify_token",
")",
"self",
".",
"init_config",
"(",
"app",
")",
"state",
"=",
"_AppState",
"(",
"app",
"=",
"app",
")",
"app",
".",
"extensions",
"[",
"'zenodo-accessrequests'",
"]",
"=",
"state"
] |
ce2cf3f1425d02ba4f3ad3202cfca43a1892558a
|
test
|
Device.name
|
Return a basic meaningful name based on device type
|
djanalytics/reports/models.py
|
def name(self):
""" Return a basic meaningful name based on device type """
if (
self.device_type and
self.device_type.code in (DeviceType.MOBILE, DeviceType.TABLET)
):
return self.device
else:
return self.browser
|
def name(self):
""" Return a basic meaningful name based on device type """
if (
self.device_type and
self.device_type.code in (DeviceType.MOBILE, DeviceType.TABLET)
):
return self.device
else:
return self.browser
|
[
"Return",
"a",
"basic",
"meaningful",
"name",
"based",
"on",
"device",
"type"
] |
analytehealth/django-analytics
|
python
|
https://github.com/analytehealth/django-analytics/blob/7782d3f81249dcb1b266afb0cb1e90000108c74d/djanalytics/reports/models.py#L123-L131
|
[
"def",
"name",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"device_type",
"and",
"self",
".",
"device_type",
".",
"code",
"in",
"(",
"DeviceType",
".",
"MOBILE",
",",
"DeviceType",
".",
"TABLET",
")",
")",
":",
"return",
"self",
".",
"device",
"else",
":",
"return",
"self",
".",
"browser"
] |
7782d3f81249dcb1b266afb0cb1e90000108c74d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.