repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
deepglo | deepglo-master/run_scripts/run_pems.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#sys.path.append("/efs/users/rajatse/DeepGLOv2/")
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/pems.npy")
print(Ymat.shape)
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [16, 16, 16, 16, 16, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.1 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 160 * 9 ## models will not look beyond this during training
start_date = "2012-5-1" ## start date time for the time-series
freq = "5T" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = None ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=9, n=160, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path(
".",
"results",
"result_dictionary_pems_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,544 | 25.259259 | 88 | py |
deepglo | deepglo-master/run_scripts/__init__.py | # Implement your code here.
| 28 | 13.5 | 27 | py |
deepglo | deepglo-master/run_scripts/run_electricity.py | #### OS and commanline arguments
import sys
import multiprocessing as mp
import gzip
import subprocess
from pathlib import Path
import argparse
import logging
import os
sys.path.append('./')
#### DeepGLO model imports
from DeepGLO.metrics import *
from DeepGLO.DeepGLO import *
from DeepGLO.LocalModel import *
import pandas as pd
import numpy as np
import pickle
import json
import random
np.random.seed(111)
torch.cuda.manual_seed(111)
torch.manual_seed(111)
random.seed(111)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def bool2str(b):
if b:
return "true"
else:
return "false"
Ymat = np.load("./datasets/electricity.npy")
vbsize = 128 ## vertical batch size
hbsize = 256 ## horizontal batch size
num_channels_X = [32, 32, 32, 32, 32, 1] ## number of channels for local model
num_channels_Y = [32, 32, 32, 32, 32, 1] ## number of channels for hybrid model
kernel_size = 7 ## kernel size for local models
dropout = 0.2 ## dropout during training
rank = 64 ## rank of global model
kernel_size_Y = 7 ## kernel size of hybrid model
lr = 0.0005 ## learning rate
val_len = 24 ## validation length
end_index = Ymat.shape[1] - 24 * 7 ## models will not look beyond this during training
start_date = "2012-1-1" ## start date time for the time-series
freq = "H" ## frequency of data
covariates = None ## no covraites specified
use_time = True ## us time covariates
dti = None ## no spcified time covariates (using default)
svd = True ## factor matrices are initialized by NMF
period = 24 ## periodicity of 24 is expected, leave it out if not known
y_iters = 300 ## max. number of iterations while training Tconv models
init_epochs = 100 ## max number of iterations while initialiozing factors
forward_cov = False
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(args):
DG = DeepGLO(
Ymat,
vbsize=vbsize,
hbsize=hbsize,
num_channels_X=num_channels_X,
num_channels_Y=num_channels_Y,
kernel_size=kernel_size,
dropout=dropout,
rank=rank,
kernel_size_Y=kernel_size_Y,
lr=lr,
val_len=val_len,
end_index=end_index,
normalize=normalize,
start_date=start_date,
freq=freq,
covariates=covariates,
use_time=use_time,
dti=dti,
svd=svd,
period=period,
forward_cov=forward_cov,
)
DG.train_all_models(y_iters=y_iters, init_epochs=init_epochs)
result_dic = DG.rolling_validation(
Ymat=Ymat, tau=24, n=7, bsize=100, cpu=False, alpha=0.3
)
print(result_dic)
out_path = Path("./results",
"result_dictionary_electricity_" + bool2str(normalize) + ".pkl",
)
pickle.dump(result_dic, open(out_path, "wb"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--normalize",
type=str2bool,
required=True,
help="normalize for training or not",
)
args = parser.parse_args()
global normalize
normalize = args.normalize
main(args)
| 3,478 | 25.157895 | 87 | py |
deepglo | deepglo-master/datasets/reshape_data.py | import numpy as np
traffic = np.load('./traffic.npy')
traffic = traffic.transpose()
np.save('./traffic.npy',traffic)
| 123 | 10.272727 | 34 | py |
jinja | jinja-main/examples/basic/test.py | from jinja2 import Environment
from jinja2.loaders import DictLoader
env = Environment(
loader=DictLoader(
{
"child.html": """\
{% extends default_layout or 'default.html' %}
{% include helpers = 'helpers.html' %}
{% macro get_the_answer() %}42{% endmacro %}
{% title = 'Hello World' %}
{% block body %}
{{ get_the_answer() }}
{{ helpers.conspirate() }}
{% endblock %}
""",
"default.html": """\
<!doctype html>
<title>{{ title }}</title>
{% block body %}{% endblock %}
""",
"helpers.html": """\
{% macro conspirate() %}23{% endmacro %}
""",
}
)
)
tmpl = env.get_template("child.html")
print(tmpl.render())
| 675 | 21.533333 | 46 | py |
jinja | jinja-main/examples/basic/debugger.py | from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
env = Environment(loader=FileSystemLoader("templates"))
tmpl = env.get_template("broken.html")
print(tmpl.render(seq=[3, 2, 4, 5, 3, 2, 0, 2, 1]))
| 223 | 31 | 55 | py |
jinja | jinja-main/examples/basic/translate.py | from jinja2 import Environment
env = Environment(extensions=["jinja2.ext.i18n"])
env.globals["gettext"] = {"Hello %(user)s!": "Hallo %(user)s!"}.__getitem__
env.globals["ngettext"] = lambda s, p, n: {
"%(count)s user": "%(count)d Benutzer",
"%(count)s users": "%(count)d Benutzer",
}[s if n == 1 else p]
print(
env.from_string(
"""\
{% trans %}Hello {{ user }}!{% endtrans %}
{% trans count=users|count -%}
{{ count }} user{% pluralize %}{{ count }} users
{% endtrans %}
"""
).render(user="someone", users=[1, 2, 3])
)
| 544 | 27.684211 | 75 | py |
jinja | jinja-main/examples/basic/cycle.py | from jinja2 import Environment
env = Environment(
line_statement_prefix="#", variable_start_string="${", variable_end_string="}"
)
print(
env.from_string(
"""\
<ul>
# for item in range(10)
<li class="${loop.cycle('odd', 'even')}">${item}</li>
# endfor
</ul>\
"""
).render()
)
| 301 | 16.764706 | 82 | py |
jinja | jinja-main/examples/basic/test_loop_filter.py | from jinja2 import Environment
tmpl = Environment().from_string(
"""\
<ul>
{%- for item in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] if item % 2 == 0 %}
<li>{{ loop.index }} / {{ loop.length }}: {{ item }}</li>
{%- endfor %}
</ul>
if condition: {{ 1 if foo else 0 }}
"""
)
print(tmpl.render(foo=True))
| 301 | 20.571429 | 67 | py |
jinja | jinja-main/examples/basic/inheritance.py | from jinja2 import Environment
from jinja2.loaders import DictLoader
env = Environment(
loader=DictLoader(
{
"a": "[A[{% block body %}{% endblock %}]]",
"b": "{% extends 'a' %}{% block body %}[B]{% endblock %}",
"c": "{% extends 'b' %}{% block body %}###{{ super() }}###{% endblock %}",
}
)
)
print(env.get_template("c").render())
| 392 | 27.071429 | 86 | py |
jinja | jinja-main/examples/basic/test_filter_and_linestatements.py | from jinja2 import Environment
env = Environment(
line_statement_prefix="%", variable_start_string="${", variable_end_string="}"
)
tmpl = env.from_string(
"""\
% macro foo()
${caller(42)}
% endmacro
<ul>
% for item in seq
<li>${item}</li>
% endfor
</ul>
% call(var) foo()
[${var}]
% endcall
% filter escape
<hello world>
% for item in [1, 2, 3]
- ${item}
% endfor
% endfilter
"""
)
print(tmpl.render(seq=range(10)))
| 456 | 15.321429 | 82 | py |
jinja | jinja-main/src/jinja2/visitor.py | """API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
import typing as t
from .nodes import Node
if t.TYPE_CHECKING:
import typing_extensions as te
class VisitCallable(te.Protocol):
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
...
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None)
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
| 3,568 | 37.376344 | 84 | py |
jinja | jinja-main/src/jinja2/parser.py | """Parse tokens from the lexer into nodes for the compiler."""
import typing
import typing as t
from . import nodes
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .lexer import describe_token
from .lexer import describe_token_expr
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
_ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include)
_MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock)
_statement_keywords = frozenset(
[
"for",
"if",
"block",
"extends",
"print",
"macro",
"include",
"from",
"import",
"set",
"with",
"autoescape",
]
)
_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
_math_nodes: t.Dict[str, t.Type[nodes.Expr]] = {
"add": nodes.Add,
"sub": nodes.Sub,
"mul": nodes.Mul,
"div": nodes.Div,
"floordiv": nodes.FloorDiv,
"mod": nodes.Mod,
}
class Parser:
"""This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(
self,
environment: "Environment",
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
state: t.Optional[str] = None,
) -> None:
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions: t.Dict[
str, t.Callable[["Parser"], t.Union[nodes.Node, t.List[nodes.Node]]]
] = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack: t.List[str] = []
self._end_token_stack: t.List[t.Tuple[str, ...]] = []
def fail(
self,
msg: str,
lineno: t.Optional[int] = None,
exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError,
) -> "te.NoReturn":
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(
self,
name: t.Optional[str],
end_token_stack: t.List[t.Tuple[str, ...]],
lineno: t.Optional[int],
) -> "te.NoReturn":
expected: t.Set[str] = set()
for exprs in end_token_stack:
expected.update(map(describe_token_expr, exprs))
if end_token_stack:
currently_looking: t.Optional[str] = " or ".join(
map(repr, map(describe_token_expr, end_token_stack[-1]))
)
else:
currently_looking = None
if name is None:
message = ["Unexpected end of template."]
else:
message = [f"Encountered unknown tag {name!r}."]
if currently_looking:
if name is not None and name in expected:
message.append(
"You probably made a nesting mistake. Jinja is expecting this tag,"
f" but currently looking for {currently_looking}."
)
else:
message.append(
f"Jinja was looking for the following tags: {currently_looking}."
)
if self._tag_stack:
message.append(
"The innermost block that needs to be closed is"
f" {self._tag_stack[-1]!r}."
)
self.fail(" ".join(message), lineno)
def fail_unknown_tag(
self, name: str, lineno: t.Optional[int] = None
) -> "te.NoReturn":
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(
self,
end_tokens: t.Optional[t.Tuple[str, ...]] = None,
lineno: t.Optional[int] = None,
) -> "te.NoReturn":
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(
self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None
) -> bool:
"""Are we at the end of a tuple?"""
if self.stream.current.type in ("variable_end", "block_end", "rparen"):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules) # type: ignore
return False
def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName:
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno)
return rv
def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a single statement."""
token = self.stream.current
if token.type != "name":
self.fail("tag name expected", token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
f = getattr(self, f"parse_{self.stream.current.value}")
return f() # type: ignore
if token.value == "call":
return self.parse_call_block()
if token.value == "filter":
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(
self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False
) -> t.List[nodes.Node]:
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if("colon")
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect("block_end")
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == "eof":
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]:
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if("assign"):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(("name:endset",), drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self) -> nodes.For:
"""Parse a for loop."""
lineno = self.stream.expect("name:for").lineno
target = self.parse_assign_target(extra_end_rules=("name:in",))
self.stream.expect("name:in")
iter = self.parse_tuple(
with_condexpr=False, extra_end_rules=("name:recursive",)
)
test = None
if self.stream.skip_if("name:if"):
test = self.parse_expression()
recursive = self.stream.skip_if("name:recursive")
body = self.parse_statements(("name:endfor", "name:else"))
if next(self.stream).value == "endfor":
else_ = []
else:
else_ = self.parse_statements(("name:endfor",), drop_needle=True)
return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
def parse_if(self) -> nodes.If:
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
while True:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test("name:elif"):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test("name:else"):
result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
break
return result
def parse_with(self) -> nodes.With:
node = nodes.With(lineno=next(self.stream).lineno)
targets: t.List[nodes.Expr] = []
values: t.List[nodes.Expr] = []
while self.stream.current.type != "block_end":
if targets:
self.stream.expect("comma")
target = self.parse_assign_target()
target.set_ctx("param")
targets.append(target)
self.stream.expect("assign")
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(("name:endwith",), drop_needle=True)
return node
def parse_autoescape(self) -> nodes.Scope:
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [nodes.Keyword("autoescape", self.parse_expression())]
node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
def parse_block(self) -> nodes.Block:
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect("name").value
node.scoped = self.stream.skip_if("name:scoped")
node.required = self.stream.skip_if("name:required")
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == "sub":
self.fail(
"Block names in Jinja have to be valid Python identifiers and may not"
" contain hyphens, use an underscore instead."
)
node.body = self.parse_statements(("name:endblock",), drop_needle=True)
# enforce that required blocks only contain whitespace or comments
# by asserting that the body, if not empty, is just TemplateData nodes
# with whitespace data
if node.required:
for body_node in node.body:
if not isinstance(body_node, nodes.Output) or any(
not isinstance(output_node, nodes.TemplateData)
or not output_node.data.isspace()
for output_node in body_node.nodes
):
self.fail("Required blocks can only contain comments or whitespace")
self.stream.skip_if("name:" + node.name)
return node
def parse_extends(self) -> nodes.Extends:
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(
self, node: _ImportInclude, default: bool
) -> _ImportInclude:
if self.stream.current.test_any(
"name:with", "name:without"
) and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self) -> nodes.Include:
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test("name:ignore") and self.stream.look().test(
"name:missing"
):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self) -> nodes.Import:
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:as")
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self) -> nodes.FromImport:
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect("name:import")
node.names = []
def parse_context() -> bool:
if self.stream.current.value in {
"with",
"without",
} and self.stream.look().test("name:context"):
node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
return False
while True:
if node.names:
self.stream.expect("comma")
if self.stream.current.type == "name":
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith("_"):
self.fail(
"names starting with an underline can not be imported",
target.lineno,
exc=TemplateAssertionError,
)
if self.stream.skip_if("name:as"):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != "comma":
break
else:
self.stream.expect("name")
if not hasattr(node, "with_context"):
node.with_context = False
return node
def parse_signature(self, node: _MacroCall) -> None:
args = node.args = []
defaults = node.defaults = []
self.stream.expect("lparen")
while self.stream.current.type != "rparen":
if args:
self.stream.expect("comma")
arg = self.parse_assign_target(name_only=True)
arg.set_ctx("param")
if self.stream.skip_if("assign"):
defaults.append(self.parse_expression())
elif defaults:
self.fail("non-default argument follows default argument")
args.append(arg)
self.stream.expect("rparen")
def parse_call_block(self) -> nodes.CallBlock:
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == "lparen":
self.parse_signature(node)
else:
node.args = []
node.defaults = []
call_node = self.parse_expression()
if not isinstance(call_node, nodes.Call):
self.fail("expected call", node.lineno)
node.call = call_node
node.body = self.parse_statements(("name:endcall",), drop_needle=True)
return node
def parse_filter_block(self) -> nodes.FilterBlock:
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True) # type: ignore
node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
return node
def parse_macro(self) -> nodes.Macro:
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
return node
def parse_print(self) -> nodes.Output:
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != "block_end":
if node.nodes:
self.stream.expect("comma")
node.nodes.append(self.parse_expression())
return node
@typing.overload
def parse_assign_target(
self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ...
) -> nodes.Name:
...
@typing.overload
def parse_assign_target(
self,
with_tuple: bool = True,
name_only: bool = False,
extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
with_namespace: bool = False,
) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
...
def parse_assign_target(
self,
with_tuple: bool = True,
name_only: bool = False,
extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
with_namespace: bool = False,
) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
"""Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
target: nodes.Expr
if with_namespace and self.stream.look().type == "dot":
token = self.stream.expect("name")
next(self.stream) # dot
attr = self.stream.expect("name")
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect("name")
target = nodes.Name(token.value, "store", lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(
simplified=True, extra_end_rules=extra_end_rules
)
else:
target = self.parse_primary()
target.set_ctx("store")
if not target.can_assign():
self.fail(
f"can't assign to {type(target).__name__.lower()!r}", target.lineno
)
return target # type: ignore
def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr:
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self) -> nodes.Expr:
lineno = self.stream.current.lineno
expr1 = self.parse_or()
expr3: t.Optional[nodes.Expr]
while self.stream.skip_if("name:if"):
expr2 = self.parse_or()
if self.stream.skip_if("name:else"):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if("name:or"):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if("name:and"):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self) -> nodes.Expr:
if self.stream.current.test("name:not"):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self) -> nodes.Expr:
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while True:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if("name:in"):
ops.append(nodes.Operand("in", self.parse_math1()))
elif self.stream.current.test("name:not") and self.stream.look().test(
"name:in"
):
self.stream.skip(2)
ops.append(nodes.Operand("notin", self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ("add", "sub"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self) -> nodes.Expr:
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == "tilde":
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self) -> nodes.Expr:
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == "pow":
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter: bool = True) -> nodes.Expr:
token_type = self.stream.current.type
lineno = self.stream.current.lineno
node: nodes.Expr
if token_type == "sub":
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == "add":
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self) -> nodes.Expr:
token = self.stream.current
node: nodes.Expr
if token.type == "name":
if token.value in ("true", "false", "True", "False"):
node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
elif token.value in ("none", "None"):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, "load", lineno=token.lineno)
next(self.stream)
elif token.type == "string":
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == "string":
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const("".join(buf), lineno=lineno)
elif token.type in ("integer", "float"):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == "lparen":
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect("rparen")
elif token.type == "lbracket":
node = self.parse_list()
elif token.type == "lbrace":
node = self.parse_dict()
else:
self.fail(f"unexpected {describe_token(token)!r}", token.lineno)
return node
def parse_tuple(
self,
simplified: bool = False,
with_condexpr: bool = True,
extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
explicit_parentheses: bool = False,
) -> t.Union[nodes.Tuple, nodes.Expr]:
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
def parse() -> nodes.Expr:
return self.parse_expression(with_condexpr=False)
args: t.List[nodes.Expr] = []
is_tuple = False
while True:
if args:
self.stream.expect("comma")
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == "comma":
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail(
"Expected an expression,"
f" got {describe_token(self.stream.current)!r}"
)
return nodes.Tuple(args, "load", lineno=lineno)
def parse_list(self) -> nodes.List:
token = self.stream.expect("lbracket")
items: t.List[nodes.Expr] = []
while self.stream.current.type != "rbracket":
if items:
self.stream.expect("comma")
if self.stream.current.type == "rbracket":
break
items.append(self.parse_expression())
self.stream.expect("rbracket")
return nodes.List(items, lineno=token.lineno)
def parse_dict(self) -> nodes.Dict:
token = self.stream.expect("lbrace")
items: t.List[nodes.Pair] = []
while self.stream.current.type != "rbrace":
if items:
self.stream.expect("comma")
if self.stream.current.type == "rbrace":
break
key = self.parse_expression()
self.stream.expect("colon")
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect("rbrace")
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node: nodes.Expr) -> nodes.Expr:
while True:
token_type = self.stream.current.type
if token_type == "dot" or token_type == "lbracket":
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == "lparen":
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr:
while True:
token_type = self.stream.current.type
if token_type == "pipe":
node = self.parse_filter(node) # type: ignore
elif token_type == "name" and self.stream.current.value == "is":
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == "lparen":
node = self.parse_call(node)
else:
break
return node
def parse_subscript(
self, node: nodes.Expr
) -> t.Union[nodes.Getattr, nodes.Getitem]:
token = next(self.stream)
arg: nodes.Expr
if token.type == "dot":
attr_token = self.stream.current
next(self.stream)
if attr_token.type == "name":
return nodes.Getattr(
node, attr_token.value, "load", lineno=token.lineno
)
elif attr_token.type != "integer":
self.fail("expected name or number", attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
if token.type == "lbracket":
args: t.List[nodes.Expr] = []
while self.stream.current.type != "rbracket":
if args:
self.stream.expect("comma")
args.append(self.parse_subscribed())
self.stream.expect("rbracket")
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, "load", lineno=token.lineno)
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
self.fail("expected subscript expression", token.lineno)
def parse_subscribed(self) -> nodes.Expr:
lineno = self.stream.current.lineno
args: t.List[t.Optional[nodes.Expr]]
if self.stream.current.type == "colon":
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != "colon":
return node
next(self.stream)
args = [node]
if self.stream.current.type == "colon":
args.append(None)
elif self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == "colon":
next(self.stream)
if self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call_args(
self,
) -> t.Tuple[
t.List[nodes.Expr],
t.List[nodes.Keyword],
t.Union[nodes.Expr, None],
t.Union[nodes.Expr, None],
]:
token = self.stream.expect("lparen")
args = []
kwargs = []
dyn_args = None
dyn_kwargs = None
require_comma = False
def ensure(expr: bool) -> None:
if not expr:
self.fail("invalid syntax for function call expression", token.lineno)
while self.stream.current.type != "rparen":
if require_comma:
self.stream.expect("comma")
# support for trailing comma
if self.stream.current.type == "rparen":
break
if self.stream.current.type == "mul":
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == "pow":
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
if (
self.stream.current.type == "name"
and self.stream.look().type == "assign"
):
# Parsing a kwarg
ensure(dyn_kwargs is None)
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
else:
# Parsing an arg
ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect("rparen")
return args, kwargs, dyn_args, dyn_kwargs
def parse_call(self, node: nodes.Expr) -> nodes.Call:
# The lparen will be expected in parse_call_args, but the lineno
# needs to be recorded before the stream is advanced.
token = self.stream.current
args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
def parse_filter(
self, node: t.Optional[nodes.Expr], start_inline: bool = False
) -> t.Optional[nodes.Expr]:
while self.stream.current.type == "pipe" or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect("name")
name = token.value
while self.stream.current.type == "dot":
next(self.stream)
name += "." + self.stream.expect("name").value
if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
)
start_inline = False
return node
def parse_test(self, node: nodes.Expr) -> nodes.Expr:
token = next(self.stream)
if self.stream.current.test("name:not"):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect("name").value
while self.stream.current.type == "dot":
next(self.stream)
name += "." + self.stream.expect("name").value
dyn_args = dyn_kwargs = None
kwargs: t.List[nodes.Keyword] = []
if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
elif self.stream.current.type in {
"name",
"string",
"integer",
"float",
"lparen",
"lbracket",
"lbrace",
} and not self.stream.current.test_any("name:else", "name:or", "name:and"):
if self.stream.current.test("name:is"):
self.fail("You cannot chain multiple tests with is")
arg_node = self.parse_primary()
arg_node = self.parse_postfix(arg_node)
args = [arg_node]
else:
args = []
node = nodes.Test(
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(
self, end_tokens: t.Optional[t.Tuple[str, ...]] = None
) -> t.List[nodes.Node]:
body: t.List[nodes.Node] = []
data_buffer: t.List[nodes.Node] = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data() -> None:
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == "data":
if token.value:
add_data(nodes.TemplateData(token.value, lineno=token.lineno))
next(self.stream)
elif token.type == "variable_begin":
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect("variable_end")
elif token.type == "block_begin":
flush_data()
next(self.stream)
if end_tokens is not None and self.stream.current.test_any(
*end_tokens
):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect("block_end")
else:
raise AssertionError("internal parsing error")
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self) -> nodes.Template:
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| 39,896 | 37.288868 | 88 | py |
jinja | jinja-main/src/jinja2/nodes.py | """AST nodes generated by the parser for the compiler. Also provides
some node tree helper functions used by the parser and compiler in order
to normalize nodes.
"""
import inspect
import operator
import typing as t
from collections import deque
from markupsafe import Markup
from .utils import _PassArg
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
_NodeBound = t.TypeVar("_NodeBound", bound="Node")
_binop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
"+": operator.add,
"-": operator.sub,
}
_uaop_to_func: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
"not": operator.not_,
"+": operator.pos,
"-": operator.neg,
}
_cmpop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"eq": operator.eq,
"ne": operator.ne,
"gt": operator.gt,
"gteq": operator.ge,
"lt": operator.lt,
"lteq": operator.le,
"in": lambda a, b: a in b,
"notin": lambda a, b: a not in b,
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(mcs, name, bases, d): # type: ignore
for attr in "fields", "attributes":
storage: t.List[t.Any] = []
storage.extend(getattr(bases[0] if bases else object, attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) <= 1, "multiple inheritance not allowed"
assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
d.setdefault("abstract", False)
return type.__new__(mcs, name, bases, d)
class EvalContext:
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(
self, environment: "Environment", template_name: t.Optional[str] = None
) -> None:
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self) -> t.Mapping[str, t.Any]:
return self.__dict__.copy()
def revert(self, old: t.Mapping[str, t.Any]) -> None:
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node: "Node", ctx: t.Optional[EvalContext]) -> EvalContext:
if ctx is None:
if node.environment is None:
raise RuntimeError(
"if no eval context is passed, the node must have an"
" attached environment."
)
return EvalContext(node.environment)
return ctx
class Node(metaclass=NodeType):
"""Baseclass for all Jinja nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields: t.Tuple[str, ...] = ()
attributes: t.Tuple[str, ...] = ("lineno", "environment")
abstract = True
lineno: int
environment: t.Optional["Environment"]
def __init__(self, *fields: t.Any, **attributes: t.Any) -> None:
if self.abstract:
raise TypeError("abstract nodes are not instantiable")
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError(f"{type(self).__name__!r} takes 0 arguments")
raise TypeError(
f"{type(self).__name__!r} takes 0 or {len(self.fields)}"
f" argument{'s' if len(self.fields) != 1 else ''}"
)
for name, arg in zip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError(f"unknown attribute {next(iter(attributes))!r}")
def iter_fields(
self,
exclude: t.Optional[t.Container[str]] = None,
only: t.Optional[t.Container[str]] = None,
) -> t.Iterator[t.Tuple[str, t.Any]]:
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (
(exclude is None and only is None)
or (exclude is not None and name not in exclude)
or (only is not None and name in only)
):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(
self,
exclude: t.Optional[t.Container[str]] = None,
only: t.Optional[t.Container[str]] = None,
) -> t.Iterator["Node"]:
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for _, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type: t.Type[_NodeBound]) -> t.Optional[_NodeBound]:
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
return None
def find_all(
self, node_type: t.Union[t.Type[_NodeBound], t.Tuple[t.Type[_NodeBound], ...]]
) -> t.Iterator[_NodeBound]:
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child # type: ignore
yield from child.find_all(node_type)
def set_ctx(self, ctx: str) -> "Node":
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if "ctx" in node.fields:
node.ctx = ctx # type: ignore
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno: int, override: bool = False) -> "Node":
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if "lineno" in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment: "Environment") -> "Node":
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other: t.Any) -> bool:
if type(self) is not type(other):
return NotImplemented
return tuple(self.iter_fields()) == tuple(other.iter_fields())
__hash__ = object.__hash__
def __repr__(self) -> str:
args_str = ", ".join(f"{a}={getattr(self, a, None)!r}" for a in self.fields)
return f"{type(self).__name__}({args_str})"
def dump(self) -> str:
def _dump(node: t.Union[Node, t.Any]) -> None:
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append(f"nodes.{type(node).__name__}(")
if not node.fields:
buf.append(")")
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(", ")
value = getattr(node, field)
if isinstance(value, list):
buf.append("[")
for idx, item in enumerate(value):
if idx:
buf.append(", ")
_dump(item)
buf.append("]")
else:
_dump(value)
buf.append(")")
buf: t.List[str] = []
_dump(self)
return "".join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ("body",)
body: t.List[Node]
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ("nodes",)
nodes: t.List["Expr"]
class Extends(Stmt):
"""Represents an extends statement."""
fields = ("template",)
template: "Expr"
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ("target", "iter", "body", "else_", "test", "recursive")
target: Node
iter: Node
body: t.List[Node]
else_: t.List[Node]
test: t.Optional[Node]
recursive: bool
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ("test", "body", "elif_", "else_")
test: Node
body: t.List[Node]
elif_: t.List["If"]
else_: t.List[Node]
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ("name", "args", "defaults", "body")
name: str
args: t.List["Name"]
defaults: t.List["Expr"]
body: t.List[Node]
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ("call", "args", "defaults", "body")
call: "Call"
args: t.List["Name"]
defaults: t.List["Expr"]
body: t.List[Node]
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ("body", "filter")
body: t.List[Node]
filter: "Filter"
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ("targets", "values", "body")
targets: t.List["Expr"]
values: t.List["Expr"]
body: t.List[Node]
class Block(Stmt):
"""A node that represents a block.
.. versionchanged:: 3.0.0
the `required` field was added.
"""
fields = ("name", "body", "scoped", "required")
name: str
body: t.List[Node]
scoped: bool
required: bool
class Include(Stmt):
"""A node that represents the include tag."""
fields = ("template", "with_context", "ignore_missing")
template: "Expr"
with_context: bool
ignore_missing: bool
class Import(Stmt):
"""A node that represents the import tag."""
fields = ("template", "target", "with_context")
template: "Expr"
target: str
with_context: bool
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ("template", "names", "with_context")
template: "Expr"
names: t.List[t.Union[str, t.Tuple[str, str]]]
with_context: bool
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ("node",)
node: Node
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ("target", "node")
target: "Expr"
node: Node
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ("target", "filter", "body")
target: "Expr"
filter: t.Optional["Filter"]
body: t.List[Node]
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self) -> bool:
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ("left", "right")
left: Expr
right: Expr
operator: str
abstract = True
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if (
eval_ctx.environment.sandboxed
and self.operator in eval_ctx.environment.intercepted_binops # type: ignore
):
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ("node",)
node: Expr
operator: str
abstract = True
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if (
eval_ctx.environment.sandboxed
and self.operator in eval_ctx.environment.intercepted_unops # type: ignore
):
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ("name", "ctx")
name: str
ctx: str
def can_assign(self) -> bool:
return self.name not in {"true", "false", "none", "True", "False", "None"}
class NSRef(Expr):
"""Reference to a namespace value assignment"""
fields = ("name", "attr")
name: str
attr: str
def can_assign(self) -> bool:
# We don't need any special checks here; NSRef assignments have a
# runtime check to ensure the target is a namespace object which will
# have been checked already as it is created using a normal assignment
# which goes through a `Name` node.
return True
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ("value",)
value: t.Any
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
return self.value
@classmethod
def from_untrusted(
cls,
value: t.Any,
lineno: t.Optional[int] = None,
environment: "t.Optional[Environment]" = None,
) -> "Const":
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ("data",)
data: str
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ("items", "ctx")
items: t.List[Expr]
ctx: str
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[t.Any, ...]:
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self) -> bool:
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ("items",)
items: t.List[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.List[t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ("items",)
items: t.List["Pair"]
def as_const(
self, eval_ctx: t.Optional[EvalContext] = None
) -> t.Dict[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ("key", "value")
key: Expr
value: Expr
def as_const(
self, eval_ctx: t.Optional[EvalContext] = None
) -> t.Tuple[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ("key", "value")
key: str
value: Expr
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[str, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ("test", "expr1", "expr2")
test: Expr
expr1: Expr
expr2: t.Optional[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
def args_as_const(
node: t.Union["_FilterTestCommon", "Call"], eval_ctx: t.Optional[EvalContext]
) -> t.Tuple[t.List[t.Any], t.Dict[t.Any, t.Any]]:
args = [x.as_const(eval_ctx) for x in node.args]
kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
if node.dyn_args is not None:
try:
args.extend(node.dyn_args.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
if node.dyn_kwargs is not None:
try:
kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
except Exception as e:
raise Impossible() from e
return args, kwargs
class _FilterTestCommon(Expr):
fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
node: Expr
name: str
args: t.List[Expr]
kwargs: t.List[Pair]
dyn_args: t.Optional[Expr]
dyn_kwargs: t.Optional[Expr]
abstract = True
_is_filter = True
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if self._is_filter:
env_map = eval_ctx.environment.filters
else:
env_map = eval_ctx.environment.tests
func = env_map.get(self.name)
pass_arg = _PassArg.from_obj(func) # type: ignore
if func is None or pass_arg is _PassArg.context:
raise Impossible()
if eval_ctx.environment.is_async and (
getattr(func, "jinja_async_variant", False) is True
or inspect.iscoroutinefunction(func)
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
if pass_arg is _PassArg.eval_context:
args.insert(0, eval_ctx)
elif pass_arg is _PassArg.environment:
args.insert(0, eval_ctx.environment)
try:
return func(*args, **kwargs)
except Exception as e:
raise Impossible() from e
class Filter(_FilterTestCommon):
"""Apply a filter to an expression. ``name`` is the name of the
filter, the other fields are the same as :class:`Call`.
If ``node`` is ``None``, the filter is being used in a filter block
and is applied to the content of the block.
"""
node: t.Optional[Expr] # type: ignore
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.node is None:
raise Impossible()
return super().as_const(eval_ctx=eval_ctx)
class Test(_FilterTestCommon):
"""Apply a test to an expression. ``name`` is the name of the test,
the other field are the same as :class:`Call`.
.. versionchanged:: 3.0
``as_const`` shares the same logic for filters and tests. Tests
check for volatile, async, and ``@pass_context`` etc.
decorators.
"""
_is_filter = False
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
node: Expr
args: t.List[Expr]
kwargs: t.List[Keyword]
dyn_args: t.Optional[Expr]
dyn_kwargs: t.Optional[Expr]
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ("node", "arg", "ctx")
node: Expr
arg: Expr
ctx: str
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.ctx != "load":
raise Impossible()
eval_ctx = get_eval_context(self, eval_ctx)
try:
return eval_ctx.environment.getitem(
self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
)
except Exception as e:
raise Impossible() from e
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ("node", "attr", "ctx")
node: Expr
attr: str
ctx: str
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
if self.ctx != "load":
raise Impossible()
eval_ctx = get_eval_context(self, eval_ctx)
try:
return eval_ctx.environment.getattr(self.node.as_const(eval_ctx), self.attr)
except Exception as e:
raise Impossible() from e
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ("start", "stop", "step")
start: t.Optional[Expr]
stop: t.Optional[Expr]
step: t.Optional[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> slice:
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj: t.Optional[Expr]) -> t.Optional[t.Any]:
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting
them to strings.
"""
fields = ("nodes",)
nodes: t.List[Expr]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
return "".join(str(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ("expr", "ops")
expr: Expr
ops: t.List["Operand"]
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
if not result:
return False
value = new_value
except Exception as e:
raise Impossible() from e
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ("op", "expr")
op: str
expr: Expr
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = "*"
class Div(BinExpr):
"""Divides the left by the right node."""
operator = "/"
class FloorDiv(BinExpr):
"""Divides the left by the right node and converts the
result into an integer by truncating.
"""
operator = "//"
class Add(BinExpr):
"""Add the left to the right node."""
operator = "+"
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = "-"
class Mod(BinExpr):
"""Left modulo right."""
operator = "%"
class Pow(BinExpr):
"""Left to the power of right."""
operator = "**"
class And(BinExpr):
"""Short circuited AND."""
operator = "and"
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = "or"
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = "not"
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = "-"
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = "+"
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ("name",)
name: str
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ("identifier", "name")
identifier: str
name: str
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ("importname",)
importname: str
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not treated specially by the compiler.
"""
fields = ("name",)
name: str
def __init__(self) -> None:
raise TypeError(
"Can't create internal names. Use the "
"`free_identifier` method on a parser."
)
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ("expr",)
expr: Expr
def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> Markup:
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ("expr",)
expr: Expr
def as_const(
self, eval_ctx: t.Optional[EvalContext] = None
) -> t.Union[Markup, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
This is basically equivalent to using the
:func:`~jinja2.pass_context` decorator when using the high-level
API, which causes a reference to the context to be passed as the
first argument to a function.
"""
class DerivedContextReference(Expr):
"""Return the current template context including locals. Behaves
exactly like :class:`ContextReference`, but includes local
variables, such as from a ``for`` loop.
.. versionadded:: 2.11
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ("body",)
body: t.List[Node]
class OverlayScope(Stmt):
"""An overlay scope for extensions. This is a largely unoptimized scope
that however can be used to introduce completely arbitrary variables into
a sub scope from a dictionary or dictionary like object. The `context`
field has to evaluate to a dictionary object.
Example usage::
OverlayScope(context=self.call_method('get_context'),
body=[...])
.. versionadded:: 2.10
"""
fields = ("context", "body")
context: Expr
body: t.List[Node]
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ("options",)
options: t.List[Keyword]
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ("body",)
body: t.List[Node]
# make sure nobody creates custom nodes
def _failing_new(*args: t.Any, **kwargs: t.Any) -> "te.NoReturn":
raise TypeError("can't create custom node types")
NodeType.__new__ = staticmethod(_failing_new) # type: ignore
del _failing_new
| 34,565 | 27.685477 | 88 | py |
jinja | jinja-main/src/jinja2/loaders.py | """API and implementations for loading templates from different data
sources.
"""
import importlib.util
import os
import posixpath
import sys
import typing as t
import weakref
import zipimport
from collections import abc
from hashlib import sha1
from importlib import import_module
from types import ModuleType
from .exceptions import TemplateNotFound
from .utils import internalcode
if t.TYPE_CHECKING:
from .environment import Environment
from .environment import Template
def split_template_path(template: str) -> t.List[str]:
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split("/"):
if (
os.sep in piece
or (os.path.altsep and os.path.altsep in piece)
or piece == os.path.pardir
):
raise TemplateNotFound(template)
elif piece and piece != ".":
pieces.append(piece)
return pieces
class BaseLoader:
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with open(path) as f:
source = f.read()
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as a string. The filename should be the name of the
file on the filesystem if it was loaded from there, otherwise
``None``. The filename is used by Python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError(
f"{type(self).__name__} cannot provide access to the source"
)
raise TemplateNotFound(template)
def list_templates(self) -> t.List[str]:
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError("this loader cannot iterate over all templates")
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(
environment, code, globals, uptodate
)
class FileSystemLoader(BaseLoader):
"""Load templates from a directory in the file system.
The path can be relative or absolute. Relative paths are relative to
the current working directory.
.. code-block:: python
loader = FileSystemLoader("templates")
A list of paths can be given. The directories will be searched in
order, stopping at the first matching template.
.. code-block:: python
loader = FileSystemLoader(["/override/templates", "/default/templates"])
:param searchpath: A path, or list of paths, to the directory that
contains the templates.
:param encoding: Use this encoding to read the text from template
files.
:param followlinks: Follow symbolic links in the path.
.. versionchanged:: 2.8
Added the ``followlinks`` parameter.
"""
def __init__(
self,
searchpath: t.Union[
str, "os.PathLike[str]", t.Sequence[t.Union[str, "os.PathLike[str]"]]
],
encoding: str = "utf-8",
followlinks: bool = False,
) -> None:
if not isinstance(searchpath, abc.Iterable) or isinstance(searchpath, str):
searchpath = [searchpath]
self.searchpath = [os.fspath(p) for p in searchpath]
self.encoding = encoding
self.followlinks = followlinks
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, str, t.Callable[[], bool]]:
pieces = split_template_path(template)
for searchpath in self.searchpath:
# Use posixpath even on Windows to avoid "drive:" or UNC
# segments breaking out of the search directory.
filename = posixpath.join(searchpath, *pieces)
if os.path.isfile(filename):
break
else:
raise TemplateNotFound(template)
with open(filename, encoding=self.encoding) as f:
contents = f.read()
mtime = os.path.getmtime(filename)
def uptodate() -> bool:
try:
return os.path.getmtime(filename) == mtime
except OSError:
return False
# Use normpath to convert Windows altsep to sep.
return contents, os.path.normpath(filename), uptodate
def list_templates(self) -> t.List[str]:
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, _, filenames in walk_dir:
for filename in filenames:
template = (
os.path.join(dirpath, filename)[len(searchpath) :]
.strip(os.sep)
.replace(os.sep, "/")
)
if template[:2] == "./":
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from a directory in a Python package.
:param package_name: Import name of the package that contains the
template directory.
:param package_path: Directory within the imported package that
contains the templates.
:param encoding: Encoding of template files.
The following example looks up templates in the ``pages`` directory
within the ``project.ui`` package.
.. code-block:: python
loader = PackageLoader("project.ui", "pages")
Only packages installed as directories (standard pip behavior) or
zip/egg files (less common) are supported. The Python API for
introspecting data in packages is too limited to support other
installation methods the way this loader requires.
There is limited support for :pep:`420` namespace packages. The
template directory is assumed to only be in one namespace
contributor. Zip files contributing to a namespace are not
supported.
.. versionchanged:: 3.0
No longer uses ``setuptools`` as a dependency.
.. versionchanged:: 3.0
Limited PEP 420 namespace package support.
"""
def __init__(
self,
package_name: str,
package_path: "str" = "templates",
encoding: str = "utf-8",
) -> None:
package_path = os.path.normpath(package_path).rstrip(os.sep)
# normpath preserves ".", which isn't valid in zip paths.
if package_path == os.path.curdir:
package_path = ""
elif package_path[:2] == os.path.curdir + os.sep:
package_path = package_path[2:]
self.package_path = package_path
self.package_name = package_name
self.encoding = encoding
# Make sure the package exists. This also makes namespace
# packages work, otherwise get_loader returns None.
import_module(package_name)
spec = importlib.util.find_spec(package_name)
assert spec is not None, "An import spec was not found for the package."
loader = spec.loader
assert loader is not None, "A loader was not found for the package."
self._loader = loader
self._archive = None
template_root = None
if isinstance(loader, zipimport.zipimporter):
self._archive = loader.archive
pkgdir = next(iter(spec.submodule_search_locations)) # type: ignore
template_root = os.path.join(pkgdir, package_path).rstrip(os.sep)
else:
roots: t.List[str] = []
# One element for regular packages, multiple for namespace
# packages, or None for single module file.
if spec.submodule_search_locations:
roots.extend(spec.submodule_search_locations)
# A single module file, use the parent directory instead.
elif spec.origin is not None:
roots.append(os.path.dirname(spec.origin))
for root in roots:
root = os.path.join(root, package_path)
if os.path.isdir(root):
template_root = root
break
if template_root is None:
raise ValueError(
f"The {package_name!r} package was not installed in a"
" way that PackageLoader understands."
)
self._template_root = template_root
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, str, t.Optional[t.Callable[[], bool]]]:
# Use posixpath even on Windows to avoid "drive:" or UNC
# segments breaking out of the search directory. Use normpath to
# convert Windows altsep to sep.
p = os.path.normpath(
posixpath.join(self._template_root, *split_template_path(template))
)
up_to_date: t.Optional[t.Callable[[], bool]]
if self._archive is None:
# Package is a directory.
if not os.path.isfile(p):
raise TemplateNotFound(template)
with open(p, "rb") as f:
source = f.read()
mtime = os.path.getmtime(p)
def up_to_date() -> bool:
return os.path.isfile(p) and os.path.getmtime(p) == mtime
else:
# Package is a zip file.
try:
source = self._loader.get_data(p) # type: ignore
except OSError as e:
raise TemplateNotFound(template) from e
# Could use the zip's mtime for all template mtimes, but
# would need to safely reload the module if it's out of
# date, so just report it as always current.
up_to_date = None
return source.decode(self.encoding), p, up_to_date
def list_templates(self) -> t.List[str]:
results: t.List[str] = []
if self._archive is None:
# Package is a directory.
offset = len(self._template_root)
for dirpath, _, filenames in os.walk(self._template_root):
dirpath = dirpath[offset:].lstrip(os.sep)
results.extend(
os.path.join(dirpath, name).replace(os.sep, "/")
for name in filenames
)
else:
if not hasattr(self._loader, "_files"):
raise TypeError(
"This zip import does not have the required"
" metadata to list templates."
)
# Package is a zip file.
prefix = self._template_root[len(self._archive) :].lstrip(os.sep) + os.sep
offset = len(prefix)
for name in self._loader._files.keys():
# Find names under the templates directory that aren't directories.
if name.startswith(prefix) and name[-1] != os.sep:
results.append(name[offset:].replace(os.sep, "/"))
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a Python dict mapping template names to
template source. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping: t.Mapping[str, str]) -> None:
self.mapping = mapping
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, None, t.Callable[[], bool]]:
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self) -> t.List[str]:
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
a string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(
self,
load_func: t.Callable[
[str],
t.Optional[
t.Union[
str, t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]
]
],
],
) -> None:
self.load_func = load_func
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
if isinstance(rv, str):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(
self, mapping: t.Mapping[str, BaseLoader], delimiter: str = "/"
) -> None:
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template: str) -> t.Tuple[BaseLoader, str]:
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError) as e:
raise TemplateNotFound(template) from e
return loader, name
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(template) from e
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(name) from e
def list_templates(self) -> t.List[str]:
result = []
for prefix, loader in self.mapping.items():
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders: t.Sequence[BaseLoader]) -> None:
self.loaders = loaders
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self) -> t.List[str]:
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(
self,
path: t.Union[
str, "os.PathLike[str]", t.Sequence[t.Union[str, "os.PathLike[str]"]]
],
) -> None:
package_name = f"_jinja2_module_templates_{id(self):x}"
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if not isinstance(path, abc.Iterable) or isinstance(path, str):
path = [path]
mod.__path__ = [os.fspath(p) for p in path]
sys.modules[package_name] = weakref.proxy(
mod, lambda x: sys.modules.pop(package_name, None)
)
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name: str) -> str:
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
def get_module_filename(name: str) -> str:
return ModuleLoader.get_template_key(name) + ".py"
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
key = self.get_template_key(name)
module = f"{self.package_name}.{key}"
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ["root"])
except ImportError as e:
raise TemplateNotFound(name) from e
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
if globals is None:
globals = {}
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals
)
| 23,058 | 33.72741 | 88 | py |
jinja | jinja-main/src/jinja2/async_utils.py | import inspect
import typing as t
from functools import WRAPPER_ASSIGNMENTS
from functools import wraps
from .utils import _PassArg
from .utils import pass_eval_context
V = t.TypeVar("V")
def async_variant(normal_func): # type: ignore
def decorator(async_func): # type: ignore
pass_arg = _PassArg.from_obj(normal_func)
need_eval_context = pass_arg is None
if pass_arg is _PassArg.environment:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].is_async)
else:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].environment.is_async)
# Take the doc and annotations from the sync function, but the
# name from the async function. Pallets-Sphinx-Themes
# build_function_directive expects __wrapped__ to point to the
# sync function.
async_func_attrs = ("__module__", "__name__", "__qualname__")
normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs))
@wraps(normal_func, assigned=normal_func_attrs)
@wraps(async_func, assigned=async_func_attrs, updated=())
def wrapper(*args, **kwargs): # type: ignore
b = is_async(args)
if need_eval_context:
args = args[1:]
if b:
return async_func(*args, **kwargs)
return normal_func(*args, **kwargs)
if need_eval_context:
wrapper = pass_eval_context(wrapper)
wrapper.jinja_async_variant = True # type: ignore[attr-defined]
return wrapper
return decorator
_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
# Avoid a costly call to isawaitable
if type(value) in _common_primitives:
return t.cast("V", value)
if inspect.isawaitable(value):
return await t.cast("t.Awaitable[V]", value)
return t.cast("V", value)
async def auto_aiter(
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> "t.AsyncIterator[V]":
if hasattr(iterable, "__aiter__"):
async for item in t.cast("t.AsyncIterable[V]", iterable):
yield item
else:
for item in iterable:
yield item
async def auto_to_list(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> t.List["V"]:
return [x async for x in auto_aiter(value)]
| 2,477 | 28.152941 | 88 | py |
jinja | jinja-main/src/jinja2/tests.py | """Built-in template tests used with the ``is`` operator."""
import operator
import typing as t
from collections import abc
from numbers import Number
from .runtime import Undefined
from .utils import pass_environment
if t.TYPE_CHECKING:
from .environment import Environment
def test_odd(value: int) -> bool:
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value: int) -> bool:
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value: int, num: int) -> bool:
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value: t.Any) -> bool:
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value: t.Any) -> bool:
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
@pass_environment
def test_filter(env: "Environment", value: str) -> bool:
"""Check if a filter exists by name. Useful if a filter may be
optionally available.
.. code-block:: jinja
{% if 'markdown' is filter %}
{{ value | markdown }}
{% else %}
{{ value }}
{% endif %}
.. versionadded:: 3.0
"""
return value in env.filters
@pass_environment
def test_test(env: "Environment", value: str) -> bool:
"""Check if a test exists by name. Useful if a test may be
optionally available.
.. code-block:: jinja
{% if 'loud' is test %}
{% if value is loud %}
{{ value|upper }}
{% else %}
{{ value|lower }}
{% endif %}
{% else %}
{{ value }}
{% endif %}
.. versionadded:: 3.0
"""
return value in env.tests
def test_none(value: t.Any) -> bool:
"""Return true if the variable is none."""
return value is None
def test_boolean(value: t.Any) -> bool:
"""Return true if the object is a boolean value.
.. versionadded:: 2.11
"""
return value is True or value is False
def test_false(value: t.Any) -> bool:
"""Return true if the object is False.
.. versionadded:: 2.11
"""
return value is False
def test_true(value: t.Any) -> bool:
"""Return true if the object is True.
.. versionadded:: 2.11
"""
return value is True
# NOTE: The existing 'number' test matches booleans and floats
def test_integer(value: t.Any) -> bool:
"""Return true if the object is an integer.
.. versionadded:: 2.11
"""
return isinstance(value, int) and value is not True and value is not False
# NOTE: The existing 'number' test matches booleans and integers
def test_float(value: t.Any) -> bool:
"""Return true if the object is a float.
.. versionadded:: 2.11
"""
return isinstance(value, float)
def test_lower(value: str) -> bool:
"""Return true if the variable is lowercased."""
return str(value).islower()
def test_upper(value: str) -> bool:
"""Return true if the variable is uppercased."""
return str(value).isupper()
def test_string(value: t.Any) -> bool:
"""Return true if the object is a string."""
return isinstance(value, str)
def test_mapping(value: t.Any) -> bool:
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, abc.Mapping)
def test_number(value: t.Any) -> bool:
"""Return true if the variable is a number."""
return isinstance(value, Number)
def test_sequence(value: t.Any) -> bool:
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except Exception:
return False
return True
def test_sameas(value: t.Any, other: t.Any) -> bool:
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value: t.Any) -> bool:
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value: t.Any) -> bool:
"""Check if the value is escaped."""
return hasattr(value, "__html__")
def test_in(value: t.Any, seq: t.Container[t.Any]) -> bool:
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
"odd": test_odd,
"even": test_even,
"divisibleby": test_divisibleby,
"defined": test_defined,
"undefined": test_undefined,
"filter": test_filter,
"test": test_test,
"none": test_none,
"boolean": test_boolean,
"false": test_false,
"true": test_true,
"integer": test_integer,
"float": test_float,
"lower": test_lower,
"upper": test_upper,
"string": test_string,
"mapping": test_mapping,
"number": test_number,
"sequence": test_sequence,
"iterable": test_iterable,
"callable": callable,
"sameas": test_sameas,
"escaped": test_escaped,
"in": test_in,
"==": operator.eq,
"eq": operator.eq,
"equalto": operator.eq,
"!=": operator.ne,
"ne": operator.ne,
">": operator.gt,
"gt": operator.gt,
"greaterthan": operator.gt,
"ge": operator.ge,
">=": operator.ge,
"<": operator.lt,
"lt": operator.lt,
"lessthan": operator.lt,
"<=": operator.le,
"le": operator.le,
}
| 5,912 | 22.097656 | 78 | py |
jinja | jinja-main/src/jinja2/ext.py | """Extension API for adding custom tags and behavior."""
import pprint
import re
import typing as t
from markupsafe import Markup
from . import defaults
from . import nodes
from .environment import Environment
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .runtime import concat # type: ignore
from .runtime import Context
from .runtime import Undefined
from .utils import import_string
from .utils import pass_context
if t.TYPE_CHECKING:
import typing_extensions as te
from .lexer import Token
from .lexer import TokenStream
from .parser import Parser
class _TranslationsBasic(te.Protocol):
def gettext(self, message: str) -> str:
...
def ngettext(self, singular: str, plural: str, n: int) -> str:
pass
class _TranslationsContext(_TranslationsBasic):
def pgettext(self, context: str, message: str) -> str:
...
def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
...
_SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
# I18N functions available in Jinja templates. If the I18N library
# provides ugettext, it will be assigned to gettext.
GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
"_",
"gettext",
"ngettext",
"pgettext",
"npgettext",
)
_ws_re = re.compile(r"\s*\n\s*")
class Extension:
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
identifier: t.ClassVar[str]
def __init_subclass__(cls) -> None:
cls.identifier = f"{cls.__module__}.{cls.__name__}"
#: if this extension parses this is the list of tags it's listening to.
tags: t.Set[str] = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment: Environment) -> None:
self.environment = environment
def bind(self, environment: Environment) -> "Extension":
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(
self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
) -> str:
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(
self, stream: "TokenStream"
) -> t.Union["TokenStream", t.Iterable["Token"]]:
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
"""
return stream
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(
self, name: str, lineno: t.Optional[int] = None
) -> nodes.ExtensionAttribute:
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
self,
name: str,
args: t.Optional[t.List[nodes.Expr]] = None,
kwargs: t.Optional[t.List[nodes.Keyword]] = None,
dyn_args: t.Optional[nodes.Expr] = None,
dyn_kwargs: t.Optional[nodes.Expr] = None,
lineno: t.Optional[int] = None,
) -> nodes.Call:
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(
self.attr(name, lineno=lineno),
args,
kwargs,
dyn_args,
dyn_kwargs,
lineno=lineno,
)
@pass_context
def _gettext_alias(
__context: Context, *args: t.Any, **kwargs: t.Any
) -> t.Union[t.Any, Undefined]:
return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
@pass_context
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, even if there are no
# variables. This makes translation strings more consistent
# and predictable. This requires escaping
return rv % variables # type: ignore
return gettext
def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
@pass_context
def ngettext(
__context: Context,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return ngettext
def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
@pass_context
def pgettext(
__context: Context, __string_ctx: str, __string: str, **variables: t.Any
) -> str:
variables.setdefault("context", __string_ctx)
rv = __context.call(func, __string_ctx, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return pgettext
def _make_new_npgettext(
func: t.Callable[[str, str, str, int], str]
) -> t.Callable[..., str]:
@pass_context
def npgettext(
__context: Context,
__string_ctx: str,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
variables.setdefault("context", __string_ctx)
variables.setdefault("num", __num)
rv = __context.call(func, __string_ctx, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
# Always treat as a format string, see gettext comment above.
return rv % variables # type: ignore
return npgettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja."""
tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment: Environment) -> None:
super().__init__(environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False,
)
def _install(
self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
) -> None:
# ugettext and ungettext are preferred in case the I18N library
# is providing compatibility with older Python versions.
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
pgettext = getattr(translations, "pgettext", None)
npgettext = getattr(translations, "npgettext", None)
self._install_callables(
gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
)
def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
import gettext
translations = gettext.NullTranslations()
if hasattr(translations, "pgettext"):
# Python < 3.8
pgettext = translations.pgettext
else:
def pgettext(c: str, s: str) -> str:
return s
if hasattr(translations, "npgettext"):
npgettext = translations.npgettext
else:
def npgettext(c: str, s: str, p: str, n: int) -> str:
return s if n == 1 else p
self._install_callables(
gettext=translations.gettext,
ngettext=translations.ngettext,
newstyle=newstyle,
pgettext=pgettext,
npgettext=npgettext,
)
def _install_callables(
self,
gettext: t.Callable[[str], str],
ngettext: t.Callable[[str, str, int], str],
newstyle: t.Optional[bool] = None,
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
) -> None:
if newstyle is not None:
self.environment.newstyle_gettext = newstyle # type: ignore
if self.environment.newstyle_gettext: # type: ignore
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
if pgettext is not None:
pgettext = _make_new_pgettext(pgettext)
if npgettext is not None:
npgettext = _make_new_npgettext(npgettext)
self.environment.globals.update(
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
)
def _uninstall(self, translations: "_SupportedTranslations") -> None:
for key in ("gettext", "ngettext", "pgettext", "npgettext"):
self.environment.globals.pop(key, None)
def _extract(
self,
source: t.Union[str, nodes.Template],
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
if isinstance(source, str):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
context = None
context_token = parser.stream.next_if("string")
if context_token is not None:
context = context_token.value
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr: t.Optional[nodes.Expr] = None
plural_expr_assignment: t.Optional[nodes.Assign] = None
num_called_num = False
variables: t.Dict[str, nodes.Expr] = {}
trimmed = None
while parser.stream.current.type != "block_end":
if variables:
parser.stream.expect("comma")
# skip colon for python compatibility
if parser.stream.skip_if("colon"):
break
token = parser.stream.expect("name")
if token.value in variables:
parser.fail(
f"translatable variable {token.value!r} defined twice.",
token.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
variables[token.value] = var = parser.parse_expression()
elif trimmed is None and token.value in ("trimmed", "notrimmed"):
trimmed = token.value == "trimmed"
continue
else:
variables[token.value] = var = nodes.Name(token.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name("_trans", "load")
variables[token.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name("_trans", "store"), var
)
else:
plural_expr = var
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], "load")
num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
token = parser.stream.expect("name")
if token.value not in variables:
parser.fail(
f"unknown variable {token.value!r} for pluralization",
token.lineno,
exc=TemplateAssertionError,
)
plural_expr = variables[token.value]
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for name in referenced:
if name not in variables:
variables[name] = nodes.Name(name, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail("pluralize without variables", lineno)
if trimmed is None:
trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(
singular,
plural,
context,
variables,
plural_expr,
bool(referenced),
num_called_num and have_plural,
)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
return _ws_re.sub(" ", string.strip())
def _parse_block(
self, parser: "Parser", allow_pluralize: bool
) -> t.Tuple[t.List[str], str]:
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while True:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
elif parser.stream.current.type == "variable_begin":
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
buf.append(f"%({name})s")
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
if parser.stream.current.test("name:endtrans"):
break
elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
parser.fail(
"a translatable section can have only one pluralize section"
)
parser.fail(
"control structures in translatable sections are not allowed"
)
elif parser.stream.eos:
parser.fail("unclosed translation block")
else:
raise RuntimeError("internal parser error")
return referenced, concat(buf)
def _make_node(
self,
singular: str,
plural: t.Optional[str],
context: t.Optional[str],
variables: t.Dict[str, nodes.Expr],
plural_expr: t.Optional[nodes.Expr],
vars_referenced: bool,
num_called_num: bool,
) -> nodes.Output:
"""Generates a useful node from the data provided."""
newstyle = self.environment.newstyle_gettext # type: ignore
node: nodes.Expr
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not newstyle:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
func_name = "gettext"
func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
if context is not None:
func_args.insert(0, nodes.Const(context))
func_name = f"p{func_name}"
if plural_expr is not None:
func_name = f"n{func_name}"
func_args.extend((nodes.Const(plural), plural_expr))
node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if newstyle:
for key, value in variables.items():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(
node,
nodes.Dict(
[
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]
),
)
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
tags = {"do"}
def parse(self, parser: "Parser") -> nodes.ExprStmt:
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = {"break", "continue"}
def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class DebugExtension(Extension):
"""A ``{% debug %}`` tag that dumps the available variables,
filters, and tests.
.. code-block:: html+jinja
<pre>{% debug %}</pre>
.. code-block:: text
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
...,
'namespace': <class 'jinja2.utils.Namespace'>},
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
.. versionadded:: 2.11.0
"""
tags = {"debug"}
def parse(self, parser: "Parser") -> nodes.Output:
lineno = parser.stream.expect("name:debug").lineno
context = nodes.ContextReference()
result = self.call_method("_render", [context], lineno=lineno)
return nodes.Output([result], lineno=lineno)
def _render(self, context: Context) -> str:
result = {
"context": context.get_all(),
"filters": sorted(self.environment.filters.keys()),
"tests": sorted(self.environment.tests.keys()),
}
# Set the depth since the intent is to show the top few names.
return pprint.pformat(result, depth=3, compact=True)
def extract_from_ast(
ast: nodes.Template,
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
babel_style: bool = True,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string, or a tuple of strings for functions
with multiple string arguments.
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
for node in ast.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
strings: t.List[t.Optional[str]] = []
for arg in node.args:
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
strings.append(arg.value)
else:
strings.append(None)
for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
out = tuple(x for x in strings if x is not None)
if not out:
continue
else:
if len(strings) == 1:
out = strings[0]
else:
out = tuple(strings)
yield node.lineno, node.node.name, out
class _CommentFinder:
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(
self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
) -> None:
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset: int) -> t.List[str]:
try:
for _, token_type, token_value in reversed(
self.tokens[self.offset : offset]
):
if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno: int) -> t.List[str]:
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(
fileobj: t.BinaryIO,
keywords: t.Sequence[str],
comment_tags: t.Sequence[str],
options: t.Dict[str, t.Any],
) -> t.Iterator[
t.Tuple[
int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
]
]:
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions: t.Dict[t.Type[Extension], None] = {}
for extension_name in options.get("extensions", "").split(","):
extension_name = extension_name.strip()
if not extension_name:
continue
extensions[import_string(extension_name)] = None
if InternationalizationExtension not in extensions:
extensions[InternationalizationExtension] = None
def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
silent = getbool(options, "silent", True)
environment = Environment(
options.get("block_start_string", defaults.BLOCK_START_STRING),
options.get("block_end_string", defaults.BLOCK_END_STRING),
options.get("variable_start_string", defaults.VARIABLE_START_STRING),
options.get("variable_end_string", defaults.VARIABLE_END_STRING),
options.get("comment_start_string", defaults.COMMENT_START_STRING),
options.get("comment_end_string", defaults.COMMENT_END_STRING),
options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
defaults.NEWLINE_SEQUENCE,
getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
tuple(extensions),
cache_size=0,
auto_reload=False,
)
if getbool(options, "trimmed"):
environment.policies["ext.i18n.trimmed"] = True
if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True # type: ignore
source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
debug = DebugExtension
| 31,470 | 35.594186 | 88 | py |
jinja | jinja-main/src/jinja2/environment.py | """Classes for managing templates and their runtime and compile time
options.
"""
import os
import typing
import typing as t
import weakref
from collections import ChainMap
from functools import lru_cache
from functools import partial
from functools import reduce
from types import CodeType
from markupsafe import Markup
from . import nodes
from .compiler import CodeGenerator
from .compiler import generate
from .defaults import BLOCK_END_STRING
from .defaults import BLOCK_START_STRING
from .defaults import COMMENT_END_STRING
from .defaults import COMMENT_START_STRING
from .defaults import DEFAULT_FILTERS
from .defaults import DEFAULT_NAMESPACE
from .defaults import DEFAULT_POLICIES
from .defaults import DEFAULT_TESTS
from .defaults import KEEP_TRAILING_NEWLINE
from .defaults import LINE_COMMENT_PREFIX
from .defaults import LINE_STATEMENT_PREFIX
from .defaults import LSTRIP_BLOCKS
from .defaults import NEWLINE_SEQUENCE
from .defaults import TRIM_BLOCKS
from .defaults import VARIABLE_END_STRING
from .defaults import VARIABLE_START_STRING
from .exceptions import TemplateNotFound
from .exceptions import TemplateRuntimeError
from .exceptions import TemplatesNotFound
from .exceptions import TemplateSyntaxError
from .exceptions import UndefinedError
from .lexer import get_lexer
from .lexer import Lexer
from .lexer import TokenStream
from .nodes import EvalContext
from .parser import Parser
from .runtime import Context
from .runtime import new_context
from .runtime import Undefined
from .utils import _PassArg
from .utils import concat
from .utils import consume
from .utils import import_string
from .utils import internalcode
from .utils import LRUCache
from .utils import missing
if t.TYPE_CHECKING:
import typing_extensions as te
from .bccache import BytecodeCache
from .ext import Extension
from .loaders import BaseLoader
_env_bound = t.TypeVar("_env_bound", bound="Environment")
# for direct template usage we have up to ten living environments
@lru_cache(maxsize=10)
def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:
"""Return a new spontaneous environment. A spontaneous environment
is used for templates created directly rather than through an
existing environment.
:param cls: Environment class to create.
:param args: Positional arguments passed to environment.
"""
env = cls(*args)
env.shared = True
return env
def create_cache(
size: int,
) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[BaseLoader]", str], "Template"]]:
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size) # type: ignore
def copy_cache(
cache: t.Optional[
t.MutableMapping[t.Tuple["weakref.ref[BaseLoader]", str], "Template"]
],
) -> t.Optional[t.MutableMapping[t.Tuple["weakref.ref[BaseLoader]", str], "Template"]]:
"""Create an empty copy of the given cache."""
if cache is None:
return None
if type(cache) is dict:
return {}
return LRUCache(cache.capacity) # type: ignore
def load_extensions(
environment: "Environment",
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]],
) -> t.Dict[str, "Extension"]:
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated extensions.
"""
result = {}
for extension in extensions:
if isinstance(extension, str):
extension = t.cast(t.Type["Extension"], import_string(extension))
result[extension.identifier] = extension(environment)
return result
def _environment_config_check(environment: "Environment") -> "Environment":
"""Perform a sanity check on the environment."""
assert issubclass(
environment.undefined, Undefined
), "'undefined' must be a subclass of 'jinja2.Undefined'."
assert (
environment.block_start_string
!= environment.variable_start_string
!= environment.comment_start_string
), "block, variable and comment start strings must be different."
assert environment.newline_sequence in {
"\r",
"\r\n",
"\n",
}, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'."
return environment
class Environment:
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here are the possible initialization parameters:
`block_start_string`
The string marking the beginning of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the beginning of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the beginning of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is ``True``.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
``None`` implicitly into an empty string here.
`autoescape`
If set to ``True`` the XML/HTML autoescaping feature is enabled by
default. For more details about autoescaping see
:class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return ``True`` or ``False`` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``400`` which means
that if more than 400 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
.. versionchanged:: 2.8
The cache size was increased to 400 from a low 50.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
``auto_reload`` is set to ``True`` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
`enable_async`
If set to true this enables async template execution which
allows using async functions and generators.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to: t.Optional["Environment"] = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: the class that is used for code generation. See
#: :class:`~jinja2.compiler.CodeGenerator` for more information.
code_generator_class: t.Type["CodeGenerator"] = CodeGenerator
concat = "".join
#: the context class that is used for templates. See
#: :class:`~jinja2.runtime.Context` for more information.
context_class: t.Type[Context] = Context
template_class: t.Type["Template"]
def __init__(
self,
block_start_string: str = BLOCK_START_STRING,
block_end_string: str = BLOCK_END_STRING,
variable_start_string: str = VARIABLE_START_STRING,
variable_end_string: str = VARIABLE_END_STRING,
comment_start_string: str = COMMENT_START_STRING,
comment_end_string: str = COMMENT_END_STRING,
line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
trim_blocks: bool = TRIM_BLOCKS,
lstrip_blocks: bool = LSTRIP_BLOCKS,
newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
optimized: bool = True,
undefined: t.Type[Undefined] = Undefined,
finalize: t.Optional[t.Callable[..., t.Any]] = None,
autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
loader: t.Optional["BaseLoader"] = None,
cache_size: int = 400,
auto_reload: bool = True,
bytecode_cache: t.Optional["BytecodeCache"] = None,
enable_async: bool = False,
):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined: t.Type[Undefined] = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# configurable policies
self.policies = DEFAULT_POLICIES.copy()
# load extensions
self.extensions = load_extensions(self, extensions)
self.is_async = enable_async
_environment_config_check(self)
def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None:
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes: t.Any) -> None:
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.items():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(
self,
block_start_string: str = missing,
block_end_string: str = missing,
variable_start_string: str = missing,
variable_end_string: str = missing,
comment_start_string: str = missing,
comment_end_string: str = missing,
line_statement_prefix: t.Optional[str] = missing,
line_comment_prefix: t.Optional[str] = missing,
trim_blocks: bool = missing,
lstrip_blocks: bool = missing,
newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = missing,
keep_trailing_newline: bool = missing,
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing,
optimized: bool = missing,
undefined: t.Type[Undefined] = missing,
finalize: t.Optional[t.Callable[..., t.Any]] = missing,
autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing,
loader: t.Optional["BaseLoader"] = missing,
cache_size: int = missing,
auto_reload: bool = missing,
bytecode_cache: t.Optional["BytecodeCache"] = missing,
enable_async: bool = False,
) -> "Environment":
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
.. versionchanged:: 3.1.2
Added the ``newline_sequence``,, ``keep_trailing_newline``,
and ``enable_async`` parameters to match ``__init__``.
"""
args = dict(locals())
del args["self"], args["cache_size"], args["extensions"], args["enable_async"]
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.items():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.items():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
if enable_async is not missing:
rv.is_async = enable_async
return _environment_config_check(rv)
@property
def lexer(self) -> Lexer:
"""The lexer for this environment."""
return get_lexer(self)
def iter_extensions(self) -> t.Iterator["Extension"]:
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (AttributeError, TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Any:
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a string.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def _filter_test_common(
self,
name: t.Union[str, Undefined],
value: t.Any,
args: t.Optional[t.Sequence[t.Any]],
kwargs: t.Optional[t.Mapping[str, t.Any]],
context: t.Optional[Context],
eval_ctx: t.Optional[EvalContext],
is_filter: bool,
) -> t.Any:
if is_filter:
env_map = self.filters
type_name = "filter"
else:
env_map = self.tests
type_name = "test"
func = env_map.get(name) # type: ignore
if func is None:
msg = f"No {type_name} named {name!r}."
if isinstance(name, Undefined):
try:
name._fail_with_undefined_error()
except Exception as e:
msg = f"{msg} ({e}; did you forget to quote the callable name?)"
raise TemplateRuntimeError(msg)
args = [value, *(args if args is not None else ())]
kwargs = kwargs if kwargs is not None else {}
pass_arg = _PassArg.from_obj(func)
if pass_arg is _PassArg.context:
if context is None:
raise TemplateRuntimeError(
f"Attempted to invoke a context {type_name} without context."
)
args.insert(0, context)
elif pass_arg is _PassArg.eval_context:
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif pass_arg is _PassArg.environment:
args.insert(0, self)
return func(*args, **kwargs)
def call_filter(
self,
name: str,
value: t.Any,
args: t.Optional[t.Sequence[t.Any]] = None,
kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
context: t.Optional[Context] = None,
eval_ctx: t.Optional[EvalContext] = None,
) -> t.Any:
"""Invoke a filter on a value the same way the compiler does.
This might return a coroutine if the filter is running from an
environment in async mode and the filter supports async
execution. It's your responsibility to await this if needed.
.. versionadded:: 2.7
"""
return self._filter_test_common(
name, value, args, kwargs, context, eval_ctx, True
)
def call_test(
self,
name: str,
value: t.Any,
args: t.Optional[t.Sequence[t.Any]] = None,
kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
context: t.Optional[Context] = None,
eval_ctx: t.Optional[EvalContext] = None,
) -> t.Any:
"""Invoke a test on a value the same way the compiler does.
This might return a coroutine if the test is running from an
environment in async mode and the test supports async execution.
It's your responsibility to await this if needed.
.. versionchanged:: 3.0
Tests support ``@pass_context``, etc. decorators. Added
the ``context`` and ``eval_ctx`` parameters.
.. versionadded:: 2.7
"""
return self._filter_test_common(
name, value, args, kwargs, context, eval_ctx, False
)
@internalcode
def parse(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> nodes.Template:
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
self.handle_exception(source=source)
def _parse(
self, source: str, name: t.Optional[str], filename: t.Optional[str]
) -> nodes.Template:
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, filename).parse()
def lex(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> t.Iterator[t.Tuple[int, str, str]]:
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = str(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
self.handle_exception(source=source)
def preprocess(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> str:
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(
lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(),
str(source),
)
def _tokenize(
self,
source: str,
name: t.Optional[str],
filename: t.Optional[str] = None,
state: t.Optional[str] = None,
) -> TokenStream:
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream) # type: ignore
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename) # type: ignore
return stream
def _generate(
self,
source: nodes.Template,
name: t.Optional[str],
filename: t.Optional[str],
defer_init: bool = False,
) -> str:
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate( # type: ignore
source,
self,
name,
filename,
defer_init=defer_init,
optimized=self.optimized,
)
def _compile(self, source: str, filename: str) -> CodeType:
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, "exec")
@typing.overload
def compile( # type: ignore
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: "te.Literal[False]" = False,
defer_init: bool = False,
) -> CodeType:
...
@typing.overload
def compile(
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: "te.Literal[True]" = ...,
defer_init: bool = False,
) -> str:
...
@internalcode
def compile(
self,
source: t.Union[str, nodes.Template],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
raw: bool = False,
defer_init: bool = False,
) -> t.Union[str, CodeType]:
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, str):
source_hint = source
source = self._parse(source, name, filename)
source = self._generate(source, name, filename, defer_init=defer_init)
if raw:
return source
if filename is None:
filename = "<template>"
return self._compile(source, filename)
except TemplateSyntaxError:
self.handle_exception(source=source_hint)
def compile_expression(
self, source: str, undefined_to_none: bool = True
) -> "TemplateExpression":
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state="variable")
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError(
"chunk after expression", parser.stream.current.lineno, None, None
)
expr.set_environment(self)
except TemplateSyntaxError:
self.handle_exception(source=source)
body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(
self,
target: t.Union[str, "os.PathLike[str]"],
extensions: t.Optional[t.Collection[str]] = None,
filter_func: t.Optional[t.Callable[[str], bool]] = None,
zip: t.Optional[str] = "deflated",
log_function: t.Optional[t.Callable[[str], None]] = None,
ignore_errors: bool = True,
) -> None:
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
By default a deflate zip algorithm is used. To switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
.. versionadded:: 2.4
"""
from .loaders import ModuleLoader
if log_function is None:
def log_function(x: str) -> None:
pass
assert log_function is not None
assert self.loader is not None, "No loader configured."
def write_file(filename: str, data: str) -> None:
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
with open(os.path.join(target, filename), "wb") as f:
f.write(data.encode("utf8"))
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(
target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
)
log_function(f"Compiling into Zip archive {target!r}")
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function(f"Compiling into folder {target!r}")
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function(f'Could not compile "{name}": {e}')
continue
filename = ModuleLoader.get_module_filename(name)
write_file(filename, code)
log_function(f'Compiled "{name}" as {filename}')
finally:
if zip:
zip_file.close()
log_function("Finished compiling templates")
def list_templates(
self,
extensions: t.Optional[t.Collection[str]] = None,
filter_func: t.Optional[t.Callable[[str], bool]] = None,
) -> t.List[str]:
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
assert self.loader is not None, "No loader configured."
names = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError(
"either extensions or filter_func can be passed, but not both"
)
def filter_func(x: str) -> bool:
return "." in x and x.rsplit(".", 1)[1] in extensions
if filter_func is not None:
names = [name for name in names if filter_func(name)]
return names
def handle_exception(self, source: t.Optional[str] = None) -> "te.NoReturn":
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
from .debug import rewrite_traceback_stack
raise rewrite_traceback_stack(source=source)
def join_path(self, template: str, parent: str) -> str:
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(
self, name: str, globals: t.Optional[t.MutableMapping[str, t.Any]]
) -> "Template":
if self.loader is None:
raise TypeError("no loader for this environment specified")
cache_key = (weakref.ref(self.loader), name)
if self.cache is not None:
template = self.cache.get(cache_key)
if template is not None and (
not self.auto_reload or template.is_up_to_date
):
# template.globals is a ChainMap, modifying it will only
# affect the template, not the environment globals.
if globals:
template.globals.update(globals)
return template
template = self.loader.load(self, name, self.make_globals(globals))
if self.cache is not None:
self.cache[cache_key] = template
return template
@internalcode
def get_template(
self,
name: t.Union[str, "Template"],
parent: t.Optional[str] = None,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
"""Load a template by name with :attr:`loader` and return a
:class:`Template`. If the template does not exist a
:exc:`TemplateNotFound` exception is raised.
:param name: Name of the template to load. When loading
templates from the filesystem, "/" is used as the path
separator, even on Windows.
:param parent: The name of the parent template importing this
template. :meth:`join_path` can be used to implement name
transformations with this.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
.. versionchanged:: 3.0
If a template is loaded from cache, ``globals`` will update
the template's globals instead of ignoring the new values.
.. versionchanged:: 2.4
If ``name`` is a :class:`Template` object it is returned
unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, globals)
@internalcode
def select_template(
self,
names: t.Iterable[t.Union[str, "Template"]],
parent: t.Optional[str] = None,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
"""Like :meth:`get_template`, but tries loading multiple names.
If none of the names can be loaded a :exc:`TemplatesNotFound`
exception is raised.
:param names: List of template names to try loading in order.
:param parent: The name of the parent template importing this
template. :meth:`join_path` can be used to implement name
transformations with this.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
.. versionchanged:: 3.0
If a template is loaded from cache, ``globals`` will update
the template's globals instead of ignoring the new values.
.. versionchanged:: 2.11
If ``names`` is :class:`Undefined`, an :exc:`UndefinedError`
is raised instead. If no templates were found and ``names``
contains :class:`Undefined`, the message is more helpful.
.. versionchanged:: 2.4
If ``names`` contains a :class:`Template` object it is
returned unchanged.
.. versionadded:: 2.3
"""
if isinstance(names, Undefined):
names._fail_with_undefined_error()
if not names:
raise TemplatesNotFound(
message="Tried to select from an empty list of templates."
)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except (TemplateNotFound, UndefinedError):
pass
raise TemplatesNotFound(names) # type: ignore
@internalcode
def get_or_select_template(
self,
template_name_or_list: t.Union[
str, "Template", t.List[t.Union[str, "Template"]]
],
parent: t.Optional[str] = None,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
"""Use :meth:`select_template` if an iterable of template names
is given, or :meth:`get_template` if one name is given.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, (str, Undefined)):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(
self,
source: t.Union[str, nodes.Template],
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
template_class: t.Optional[t.Type["Template"]] = None,
) -> "Template":
"""Load a template from a source string without using
:attr:`loader`.
:param source: Jinja source to compile into a template.
:param globals: Extend the environment :attr:`globals` with
these extra variables available for all renders of this
template. If the template has already been loaded and
cached, its globals are updated with any new items.
:param template_class: Return an instance of this
:class:`Template` class.
"""
gs = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), gs, None)
def make_globals(
self, d: t.Optional[t.MutableMapping[str, t.Any]]
) -> t.MutableMapping[str, t.Any]:
"""Make the globals map for a template. Any given template
globals overlay the environment :attr:`globals`.
Returns a :class:`collections.ChainMap`. This allows any changes
to a template's globals to only affect that template, while
changes to the environment's globals are still reflected.
However, avoid modifying any globals after a template is loaded.
:param d: Dict of template-specific globals.
.. versionchanged:: 3.0
Use :class:`collections.ChainMap` to always prevent mutating
environment globals.
"""
if d is None:
d = {}
return ChainMap(d, self.globals)
class Template:
"""A compiled template that can be rendered.
Use the methods on :class:`Environment` to create or load templates.
The environment is used to configure how templates are compiled and
behave.
It is also possible to create a template object directly. This is
not usually recommended. The constructor takes most of the same
arguments as :class:`Environment`. All templates created with the
same environment arguments share the same ephemeral ``Environment``
instance behind the scenes.
A template object should be considered immutable. Modifications on
the object are not supported.
"""
#: Type of environment to create when creating a template directly
#: rather than through an existing environment.
environment_class: t.Type[Environment] = Environment
environment: Environment
globals: t.MutableMapping[str, t.Any]
name: t.Optional[str]
filename: t.Optional[str]
blocks: t.Dict[str, t.Callable[[Context], t.Iterator[str]]]
root_render_func: t.Callable[[Context], t.Iterator[str]]
_module: t.Optional["TemplateModule"]
_debug_info: str
_uptodate: t.Optional[t.Callable[[], bool]]
def __new__(
cls,
source: t.Union[str, nodes.Template],
block_start_string: str = BLOCK_START_STRING,
block_end_string: str = BLOCK_END_STRING,
variable_start_string: str = VARIABLE_START_STRING,
variable_end_string: str = VARIABLE_END_STRING,
comment_start_string: str = COMMENT_START_STRING,
comment_end_string: str = COMMENT_END_STRING,
line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
trim_blocks: bool = TRIM_BLOCKS,
lstrip_blocks: bool = LSTRIP_BLOCKS,
newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
optimized: bool = True,
undefined: t.Type[Undefined] = Undefined,
finalize: t.Optional[t.Callable[..., t.Any]] = None,
autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
enable_async: bool = False,
) -> t.Any: # it returns a `Template`, but this breaks the sphinx build...
env = get_spontaneous_environment(
cls.environment_class, # type: ignore
block_start_string,
block_end_string,
variable_start_string,
variable_end_string,
comment_start_string,
comment_end_string,
line_statement_prefix,
line_comment_prefix,
trim_blocks,
lstrip_blocks,
newline_sequence,
keep_trailing_newline,
frozenset(extensions),
optimized,
undefined, # type: ignore
finalize,
autoescape,
None,
0,
False,
None,
enable_async,
)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(
cls,
environment: Environment,
code: CodeType,
globals: t.MutableMapping[str, t.Any],
uptodate: t.Optional[t.Callable[[], bool]] = None,
) -> "Template":
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {"environment": environment, "__file__": code.co_filename}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(
cls,
environment: Environment,
module_dict: t.MutableMapping[str, t.Any],
globals: t.MutableMapping[str, t.Any],
) -> "Template":
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(
cls,
environment: Environment,
namespace: t.MutableMapping[str, t.Any],
globals: t.MutableMapping[str, t.Any],
) -> "Template":
t: "Template" = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace["name"]
t.filename = namespace["__file__"]
t.blocks = namespace["blocks"]
# render function and module
t.root_render_func = namespace["root"]
t._module = None
# debug and loader helpers
t._debug_info = namespace["debug_info"]
t._uptodate = None
# store the reference
namespace["environment"] = environment
namespace["__jinja_template__"] = t
return t
def render(self, *args: t.Any, **kwargs: t.Any) -> str:
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as a string.
"""
if self.environment.is_async:
import asyncio
close = False
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
close = True
try:
return loop.run_until_complete(self.render_async(*args, **kwargs))
finally:
if close:
loop.close()
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment.concat(self.root_render_func(ctx)) # type: ignore
except Exception:
self.environment.handle_exception()
async def render_async(self, *args: t.Any, **kwargs: t.Any) -> str:
"""This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
Example usage::
await template.render_async(knights='that say nih; asynchronously')
"""
if not self.environment.is_async:
raise RuntimeError(
"The environment was not created with async mode enabled."
)
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment.concat( # type: ignore
[n async for n in self.root_render_func(ctx)] # type: ignore
)
except Exception:
return self.environment.handle_exception()
def stream(self, *args: t.Any, **kwargs: t.Any) -> "TemplateStream":
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args: t.Any, **kwargs: t.Any) -> t.Iterator[str]:
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as strings.
It accepts the same arguments as :meth:`render`.
"""
if self.environment.is_async:
import asyncio
async def to_list() -> t.List[str]:
return [x async for x in self.generate_async(*args, **kwargs)]
yield from asyncio.run(to_list())
return
ctx = self.new_context(dict(*args, **kwargs))
try:
yield from self.root_render_func(ctx)
except Exception:
yield self.environment.handle_exception()
async def generate_async(
self, *args: t.Any, **kwargs: t.Any
) -> t.AsyncIterator[str]:
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
if not self.environment.is_async:
raise RuntimeError(
"The environment was not created with async mode enabled."
)
ctx = self.new_context(dict(*args, **kwargs))
try:
async for event in self.root_render_func(ctx): # type: ignore
yield event
except Exception:
yield self.environment.handle_exception()
def new_context(
self,
vars: t.Optional[t.Dict[str, t.Any]] = None,
shared: bool = False,
locals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> Context:
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as is to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(
self.environment, self.name, self.blocks, vars, shared, self.globals, locals
)
def make_module(
self,
vars: t.Optional[t.Dict[str, t.Any]] = None,
shared: bool = False,
locals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "TemplateModule":
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
ctx = self.new_context(vars, shared, locals)
return TemplateModule(self, ctx)
async def make_module_async(
self,
vars: t.Optional[t.Dict[str, t.Any]] = None,
shared: bool = False,
locals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "TemplateModule":
"""As template module creation can invoke template code for
asynchronous executions this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
ctx = self.new_context(vars, shared, locals)
return TemplateModule(
self, ctx, [x async for x in self.root_render_func(ctx)] # type: ignore
)
@internalcode
def _get_default_module(self, ctx: t.Optional[Context] = None) -> "TemplateModule":
"""If a context is passed in, this means that the template was
imported. Imported templates have access to the current
template's globals by default, but they can only be accessed via
the context during runtime.
If there are new globals, we need to create a new module because
the cached module is already rendered and will not have access
to globals from the current context. This new module is not
cached because the template can be imported elsewhere, and it
should have access to only the current template's globals.
"""
if self.environment.is_async:
raise RuntimeError("Module is not available in async mode.")
if ctx is not None:
keys = ctx.globals_keys - self.globals.keys()
if keys:
return self.make_module({k: ctx.parent[k] for k in keys})
if self._module is None:
self._module = self.make_module()
return self._module
async def _get_default_module_async(
self, ctx: t.Optional[Context] = None
) -> "TemplateModule":
if ctx is not None:
keys = ctx.globals_keys - self.globals.keys()
if keys:
return await self.make_module_async({k: ctx.parent[k] for k in keys})
if self._module is None:
self._module = await self.make_module_async()
return self._module
@property
def module(self) -> "TemplateModule":
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> str(t.module)
'23'
>>> t.module.foo() == u'42'
True
This attribute is not available if async mode is enabled.
"""
return self._get_default_module()
def get_corresponding_lineno(self, lineno: int) -> int:
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self) -> bool:
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self) -> t.List[t.Tuple[int, int]]:
"""The debug info mapping."""
if self._debug_info:
return [
tuple(map(int, x.split("="))) # type: ignore
for x in self._debug_info.split("&")
]
return []
def __repr__(self) -> str:
if self.name is None:
name = f"memory:{id(self):x}"
else:
name = repr(self.name)
return f"<{type(self).__name__} {name}>"
class TemplateModule:
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into a string renders the contents.
"""
def __init__(
self,
template: Template,
context: Context,
body_stream: t.Optional[t.Iterable[str]] = None,
) -> None:
if body_stream is None:
if context.environment.is_async:
raise RuntimeError(
"Async mode requires a body stream to be passed to"
" a template module. Use the async methods of the"
" API you are using."
)
body_stream = list(template.root_render_func(context))
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self) -> Markup:
return Markup(concat(self._body_stream))
def __str__(self) -> str:
return concat(self._body_stream)
def __repr__(self) -> str:
if self.__name__ is None:
name = f"memory:{id(self):x}"
else:
name = repr(self.__name__)
return f"<{type(self).__name__} {name}>"
class TemplateExpression:
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template: Template, undefined_to_none: bool) -> None:
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Optional[t.Any]:
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars["result"]
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
class TemplateStream:
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen: t.Iterator[str]) -> None:
self._gen = gen
self.disable_buffering()
def dump(
self,
fp: t.Union[str, t.IO[t.Any]],
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
) -> None:
"""Dump the complete stream into a file or file-like object.
Per default strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, str):
if encoding is None:
encoding = "utf-8"
fp = open(fp, "wb")
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self) # type: ignore
else:
iterable = self # type: ignore
if hasattr(fp, "writelines"):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self) -> None:
"""Disable the output buffering."""
self._next = partial(next, self._gen)
self.buffered = False
def _buffered_generator(self, size: int) -> t.Iterator[str]:
buf: t.List[str] = []
c_size = 0
push = buf.append
while True:
try:
while c_size < size:
c = next(self._gen)
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
def enable_buffering(self, size: int = 5) -> None:
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError("buffer size too small")
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
def __iter__(self) -> "TemplateStream":
return self
def __next__(self) -> str:
return self._next() # type: ignore
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| 61,362 | 35.744311 | 88 | py |
jinja | jinja-main/src/jinja2/exceptions.py | import typing as t
if t.TYPE_CHECKING:
from .runtime import Undefined
class TemplateError(Exception):
"""Baseclass for all template errors."""
def __init__(self, message: t.Optional[str] = None) -> None:
super().__init__(message)
@property
def message(self) -> t.Optional[str]:
return self.args[0] if self.args else None
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
.. versionchanged:: 2.11
If the given name is :class:`Undefined` and no message was
provided, an :exc:`UndefinedError` is raised.
"""
# Silence the Python warning about message being deprecated since
# it's not valid here.
message: t.Optional[str] = None
def __init__(
self,
name: t.Optional[t.Union[str, "Undefined"]],
message: t.Optional[str] = None,
) -> None:
IOError.__init__(self, name)
if message is None:
from .runtime import Undefined
if isinstance(name, Undefined):
name._fail_with_undefined_error()
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self) -> str:
return str(self.message)
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(
self,
names: t.Sequence[t.Union[str, "Undefined"]] = (),
message: t.Optional[str] = None,
) -> None:
if message is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
parts_str = ", ".join(map(str, parts))
message = f"none of the templates given were found: {parts_str}"
super().__init__(names[-1] if names else None, message)
self.templates = list(names)
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(
self,
message: str,
lineno: int,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> None:
super().__init__(message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source: t.Optional[str] = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self) -> str:
# for translated errors we only return the message
if self.translated:
return t.cast(str, self.message)
# otherwise attach some stuff
location = f"line {self.lineno}"
name = self.filename or self.name
if name:
location = f'File "{name}", {location}'
lines = [t.cast(str, self.message), " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
pass
else:
lines.append(" " + line.strip())
return "\n".join(lines)
def __reduce__(self): # type: ignore
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| 5,071 | 29.371257 | 84 | py |
jinja | jinja-main/src/jinja2/constants.py | #: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = """\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate"""
| 1,433 | 67.285714 | 79 | py |
jinja | jinja-main/src/jinja2/lexer.py | """Implements a Jinja / Python combination lexer. The ``Lexer`` class
is used to do some preprocessing. It filters out invalid operators like
the bitshift operators we don't allow in templates. It separates
template code and python code in expressions.
"""
import re
import typing as t
from ast import literal_eval
from collections import deque
from sys import intern
from ._identifier import pattern as name_re
from .exceptions import TemplateSyntaxError
from .utils import LRUCache
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache: t.MutableMapping[t.Tuple, "Lexer"] = LRUCache(50) # type: ignore
# static regular expressions
whitespace_re = re.compile(r"\s+")
newline_re = re.compile(r"(\r\n|\r|\n)")
string_re = re.compile(
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
)
integer_re = re.compile(
r"""
(
0b(_?[0-1])+ # binary
|
0o(_?[0-7])+ # octal
|
0x(_?[\da-f])+ # hex
|
[1-9](_?\d)* # decimal
|
0(_?0)* # decimal zero
)
""",
re.IGNORECASE | re.VERBOSE,
)
float_re = re.compile(
r"""
(?<!\.) # doesn't start with a .
(\d+_)*\d+ # digits, possibly _ separated
(
(\.(\d+_)*\d+)? # optional fractional part
e[+\-]?(\d+_)*\d+ # exponent part
|
\.(\d+_)*\d+ # required fractional part
)
""",
re.IGNORECASE | re.VERBOSE,
)
# internal the tokens and keep references to them
TOKEN_ADD = intern("add")
TOKEN_ASSIGN = intern("assign")
TOKEN_COLON = intern("colon")
TOKEN_COMMA = intern("comma")
TOKEN_DIV = intern("div")
TOKEN_DOT = intern("dot")
TOKEN_EQ = intern("eq")
TOKEN_FLOORDIV = intern("floordiv")
TOKEN_GT = intern("gt")
TOKEN_GTEQ = intern("gteq")
TOKEN_LBRACE = intern("lbrace")
TOKEN_LBRACKET = intern("lbracket")
TOKEN_LPAREN = intern("lparen")
TOKEN_LT = intern("lt")
TOKEN_LTEQ = intern("lteq")
TOKEN_MOD = intern("mod")
TOKEN_MUL = intern("mul")
TOKEN_NE = intern("ne")
TOKEN_PIPE = intern("pipe")
TOKEN_POW = intern("pow")
TOKEN_RBRACE = intern("rbrace")
TOKEN_RBRACKET = intern("rbracket")
TOKEN_RPAREN = intern("rparen")
TOKEN_SEMICOLON = intern("semicolon")
TOKEN_SUB = intern("sub")
TOKEN_TILDE = intern("tilde")
TOKEN_WHITESPACE = intern("whitespace")
TOKEN_FLOAT = intern("float")
TOKEN_INTEGER = intern("integer")
TOKEN_NAME = intern("name")
TOKEN_STRING = intern("string")
TOKEN_OPERATOR = intern("operator")
TOKEN_BLOCK_BEGIN = intern("block_begin")
TOKEN_BLOCK_END = intern("block_end")
TOKEN_VARIABLE_BEGIN = intern("variable_begin")
TOKEN_VARIABLE_END = intern("variable_end")
TOKEN_RAW_BEGIN = intern("raw_begin")
TOKEN_RAW_END = intern("raw_end")
TOKEN_COMMENT_BEGIN = intern("comment_begin")
TOKEN_COMMENT_END = intern("comment_end")
TOKEN_COMMENT = intern("comment")
TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
TOKEN_LINESTATEMENT_END = intern("linestatement_end")
TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
TOKEN_LINECOMMENT_END = intern("linecomment_end")
TOKEN_LINECOMMENT = intern("linecomment")
TOKEN_DATA = intern("data")
TOKEN_INITIAL = intern("initial")
TOKEN_EOF = intern("eof")
# bind operators to token types
operators = {
"+": TOKEN_ADD,
"-": TOKEN_SUB,
"/": TOKEN_DIV,
"//": TOKEN_FLOORDIV,
"*": TOKEN_MUL,
"%": TOKEN_MOD,
"**": TOKEN_POW,
"~": TOKEN_TILDE,
"[": TOKEN_LBRACKET,
"]": TOKEN_RBRACKET,
"(": TOKEN_LPAREN,
")": TOKEN_RPAREN,
"{": TOKEN_LBRACE,
"}": TOKEN_RBRACE,
"==": TOKEN_EQ,
"!=": TOKEN_NE,
">": TOKEN_GT,
">=": TOKEN_GTEQ,
"<": TOKEN_LT,
"<=": TOKEN_LTEQ,
"=": TOKEN_ASSIGN,
".": TOKEN_DOT,
":": TOKEN_COLON,
"|": TOKEN_PIPE,
",": TOKEN_COMMA,
";": TOKEN_SEMICOLON,
}
reverse_operators = {v: k for k, v in operators.items()}
assert len(operators) == len(reverse_operators), "operators dropped"
operator_re = re.compile(
f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
)
ignored_tokens = frozenset(
[
TOKEN_COMMENT_BEGIN,
TOKEN_COMMENT,
TOKEN_COMMENT_END,
TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT,
]
)
ignore_if_empty = frozenset(
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
)
def _describe_token_type(token_type: str) -> str:
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: "begin of comment",
TOKEN_COMMENT_END: "end of comment",
TOKEN_COMMENT: "comment",
TOKEN_LINECOMMENT: "comment",
TOKEN_BLOCK_BEGIN: "begin of statement block",
TOKEN_BLOCK_END: "end of statement block",
TOKEN_VARIABLE_BEGIN: "begin of print statement",
TOKEN_VARIABLE_END: "end of print statement",
TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
TOKEN_LINESTATEMENT_END: "end of line statement",
TOKEN_DATA: "template data / text",
TOKEN_EOF: "end of template",
}.get(token_type, token_type)
def describe_token(token: "Token") -> str:
"""Returns a description of the token."""
if token.type == TOKEN_NAME:
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr: str) -> str:
"""Like `describe_token` but for token expressions."""
if ":" in expr:
type, value = expr.split(":", 1)
if type == TOKEN_NAME:
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value: str) -> int:
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment: "Environment") -> t.List[t.Tuple[str, str]]:
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(
len(environment.comment_start_string),
TOKEN_COMMENT_BEGIN,
e(environment.comment_start_string),
),
(
len(environment.block_start_string),
TOKEN_BLOCK_BEGIN,
e(environment.block_start_string),
),
(
len(environment.variable_start_string),
TOKEN_VARIABLE_BEGIN,
e(environment.variable_start_string),
),
]
if environment.line_statement_prefix is not None:
rules.append(
(
len(environment.line_statement_prefix),
TOKEN_LINESTATEMENT_BEGIN,
r"^[ \t\v]*" + e(environment.line_statement_prefix),
)
)
if environment.line_comment_prefix is not None:
rules.append(
(
len(environment.line_comment_prefix),
TOKEN_LINECOMMENT_BEGIN,
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
)
)
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure:
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(
self, message: str, cls: t.Type[TemplateSyntaxError] = TemplateSyntaxError
) -> None:
self.message = message
self.error_class = cls
def __call__(self, lineno: int, filename: str) -> "te.NoReturn":
raise self.error_class(self.message, lineno, filename)
class Token(t.NamedTuple):
lineno: int
type: str
value: str
def __str__(self) -> str:
return describe_token(self)
def test(self, expr: str) -> bool:
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
if ":" in expr:
return expr.split(":", 1) == [self.type, self.value]
return False
def test_any(self, *iterable: str) -> bool:
"""Test against multiple token expressions."""
return any(self.test(expr) for expr in iterable)
class TokenStreamIterator:
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream: "TokenStream") -> None:
self.stream = stream
def __iter__(self) -> "TokenStreamIterator":
return self
def __next__(self) -> Token:
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration
next(self.stream)
return token
class TokenStream:
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(
self,
generator: t.Iterable[Token],
name: t.Optional[str],
filename: t.Optional[str],
):
self._iter = iter(generator)
self._pushed: "te.Deque[Token]" = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, "")
next(self)
def __iter__(self) -> TokenStreamIterator:
return TokenStreamIterator(self)
def __bool__(self) -> bool:
return bool(self._pushed) or self.current.type is not TOKEN_EOF
@property
def eos(self) -> bool:
"""Are we at the end of the stream?"""
return not self
def push(self, token: Token) -> None:
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self) -> Token:
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n: int = 1) -> None:
"""Got n tokens ahead."""
for _ in range(n):
next(self)
def next_if(self, expr: str) -> t.Optional[Token]:
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
return None
def skip_if(self, expr: str) -> bool:
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self) -> Token:
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self) -> None:
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, "")
self._iter = iter(())
self.closed = True
def expect(self, expr: str) -> Token:
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError(
f"unexpected end of template, expected {expr!r}.",
self.current.lineno,
self.name,
self.filename,
)
raise TemplateSyntaxError(
f"expected token {expr!r}, got {describe_token(self.current)!r}",
self.current.lineno,
self.name,
self.filename,
)
return next(self)
def get_lexer(environment: "Environment") -> "Lexer":
"""Return a lexer which is probably cached."""
key = (
environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline,
)
lexer = _lexer_cache.get(key)
if lexer is None:
_lexer_cache[key] = lexer = Lexer(environment)
return lexer
class OptionalLStrip(tuple): # type: ignore[type-arg]
"""A special tuple for marking a point in the state that can have
lstrip applied.
"""
__slots__ = ()
# Even though it looks like a no-op, creating instances fails
# without this.
def __new__(cls, *members, **kwargs): # type: ignore
return super().__new__(cls, members)
class _Rule(t.NamedTuple):
pattern: t.Pattern[str]
tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
command: t.Optional[str]
class Lexer:
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment: "Environment") -> None:
# shortcuts
e = re.escape
def c(x: str) -> t.Pattern[str]:
return re.compile(x, re.M | re.S)
# lexing rules for tags
tag_rules: t.List[_Rule] = [
_Rule(whitespace_re, TOKEN_WHITESPACE, None),
_Rule(float_re, TOKEN_FLOAT, None),
_Rule(integer_re, TOKEN_INTEGER, None),
_Rule(name_re, TOKEN_NAME, None),
_Rule(string_re, TOKEN_STRING, None),
_Rule(operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
block_start_re = e(environment.block_start_string)
block_end_re = e(environment.block_end_string)
comment_end_re = e(environment.comment_end_string)
variable_end_re = e(environment.variable_end_string)
# block suffix if trimming is enabled
block_suffix_re = "\\n?" if environment.trim_blocks else ""
self.lstrip_blocks = environment.lstrip_blocks
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
root_raw_re = (
rf"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
rf"(?:\-{block_end_re}\s*|{block_end_re}))"
)
root_parts_re = "|".join(
[root_raw_re] + [rf"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
)
# global lexing rules
self.rules: t.Dict[str, t.List[_Rule]] = {
"root": [
# directives
_Rule(
c(rf"(.*?)(?:{root_parts_re})"),
OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
"#bygroup",
),
# data
_Rule(c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
_Rule(
c(
rf"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
rf"|{comment_end_re}{block_suffix_re}))"
),
(TOKEN_COMMENT, TOKEN_COMMENT_END),
"#pop",
),
_Rule(c(r"(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
_Rule(
c(
rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
rf"|{block_end_re}{block_suffix_re})"
),
TOKEN_BLOCK_END,
"#pop",
),
]
+ tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
_Rule(
c(rf"\-{variable_end_re}\s*|{variable_end_re}"),
TOKEN_VARIABLE_END,
"#pop",
)
]
+ tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
_Rule(
c(
rf"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
rf"(?:\+{block_end_re}|\-{block_end_re}\s*"
rf"|{block_end_re}{block_suffix_re}))"
),
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
"#pop",
),
_Rule(c(r"(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
_Rule(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
]
+ tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
_Rule(
c(r"(.*?)()(?=\n|$)"),
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
"#pop",
)
],
}
def _normalize_newlines(self, value: str) -> str:
"""Replace all newlines with the configured sequence in strings
and template data.
"""
return newline_re.sub(self.newline_sequence, value)
def tokenize(
self,
source: str,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
state: t.Optional[str] = None,
) -> TokenStream:
"""Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(
self,
stream: t.Iterable[t.Tuple[int, str, str]],
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> t.Iterator[Token]:
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value_str in stream:
if token in ignored_tokens:
continue
value: t.Any = value_str
if token == TOKEN_LINESTATEMENT_BEGIN:
token = TOKEN_BLOCK_BEGIN
elif token == TOKEN_LINESTATEMENT_END:
token = TOKEN_BLOCK_END
# we are not interested in those tokens in the parser
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
elif token == TOKEN_DATA:
value = self._normalize_newlines(value_str)
elif token == "keyword":
token = value_str
elif token == TOKEN_NAME:
value = value_str
if not value.isidentifier():
raise TemplateSyntaxError(
"Invalid character in identifier", lineno, name, filename
)
elif token == TOKEN_STRING:
# try to unescape string
try:
value = (
self._normalize_newlines(value_str[1:-1])
.encode("ascii", "backslashreplace")
.decode("unicode-escape")
)
except Exception as e:
msg = str(e).split(":")[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename) from e
elif token == TOKEN_INTEGER:
value = int(value_str.replace("_", ""), 0)
elif token == TOKEN_FLOAT:
# remove all "_" first to support more Python versions
value = literal_eval(value_str.replace("_", ""))
elif token == TOKEN_OPERATOR:
token = operators[value_str]
yield Token(lineno, token, value)
def tokeniter(
self,
source: str,
name: t.Optional[str],
filename: t.Optional[str] = None,
state: t.Optional[str] = None,
) -> t.Iterator[t.Tuple[int, str, str]]:
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
.. versionchanged:: 3.0
Only ``\\n``, ``\\r\\n`` and ``\\r`` are treated as line
breaks.
"""
lines = newline_re.split(source)[::2]
if not self.keep_trailing_newline and lines[-1] == "":
del lines[-1]
source = "\n".join(lines)
pos = 0
lineno = 1
stack = ["root"]
if state is not None and state != "root":
assert state in ("variable", "block"), "invalid state"
stack.append(state + "_begin")
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack: t.List[str] = []
newlines_stripped = 0
line_starting = True
while True:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and tokens in (
TOKEN_VARIABLE_END,
TOKEN_BLOCK_END,
TOKEN_LINESTATEMENT_END,
):
continue
# tuples support more options
if isinstance(tokens, tuple):
groups: t.Sequence[str] = m.groups()
if isinstance(tokens, OptionalLStrip):
# Rule supports lstrip. Match will look like
# text, block type, whitespace control, type, control, ...
text = groups[0]
# Skipping the text and first type, every other group is the
# whitespace control for each type. One of the groups will be
# -, +, or empty string instead of None.
strip_sign = next(g for g in groups[2::2] if g is not None)
if strip_sign == "-":
# Strip all whitespace between the text and the tag.
stripped = text.rstrip()
newlines_stripped = text[len(stripped) :].count("\n")
groups = [stripped, *groups[1:]]
elif (
# Not marked for preserving whitespace.
strip_sign != "+"
# lstrip is enabled.
and self.lstrip_blocks
# Not a variable expression.
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
):
# The start of text between the last newline and the tag.
l_pos = text.rfind("\n") + 1
if l_pos > 0 or line_starting:
# If there's only whitespace between the newline and the
# tag, strip it.
if whitespace_re.fullmatch(text, l_pos):
groups = [text[:l_pos], *groups[1:]]
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == "#bygroup":
for key, value in m.groupdict().items():
if value is not None:
yield lineno, key, value
lineno += value.count("\n")
break
else:
raise RuntimeError(
f"{regex!r} wanted to resolve the token dynamically"
" but no group matched"
)
# normal group
else:
data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count("\n") + newlines_stripped
newlines_stripped = 0
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == TOKEN_OPERATOR:
if data == "{":
balancing_stack.append("}")
elif data == "(":
balancing_stack.append(")")
elif data == "[":
balancing_stack.append("]")
elif data in ("}", ")", "]"):
if not balancing_stack:
raise TemplateSyntaxError(
f"unexpected '{data}'", lineno, name, filename
)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError(
f"unexpected '{data}', expected '{expected_op}'",
lineno,
name,
filename,
)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count("\n")
line_starting = m.group()[-1:] == "\n"
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == "#pop":
stack.pop()
# resolve the new state by group checking
elif new_state == "#bygroup":
for key, value in m.groupdict().items():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError(
f"{regex!r} wanted to resolve the new state dynamically"
f" but no group matched"
)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError(
f"{regex!r} yielded empty string without stack change"
)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError(
f"unexpected char {source[pos]!r} at {pos}", lineno, name, filename
)
| 29,752 | 33.317186 | 88 | py |
jinja | jinja-main/src/jinja2/nativetypes.py | import typing as t
from ast import literal_eval
from ast import parse
from itertools import chain
from itertools import islice
from types import GeneratorType
from . import nodes
from .compiler import CodeGenerator
from .compiler import Frame
from .compiler import has_safe_repr
from .environment import Environment
from .environment import Template
def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
"""Return a native Python type from the list of compiled nodes. If
the result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
the string is returned.
:param values: Iterable of outputs to concatenate.
"""
head = list(islice(values, 2))
if not head:
return None
if len(head) == 1:
raw = head[0]
if not isinstance(raw, str):
return raw
else:
if isinstance(values, GeneratorType):
values = chain(head, values)
raw = "".join([str(v) for v in values])
try:
return literal_eval(
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
# from the given string. For backwards compatibility we need to
# parse the string ourselves without removing leading spaces/tabs.
parse(raw, mode="eval")
)
except (ValueError, SyntaxError, MemoryError):
return raw
class NativeCodeGenerator(CodeGenerator):
"""A code generator which renders Python types by not adding
``str()`` around output nodes.
"""
@staticmethod
def _default_finalize(value: t.Any) -> t.Any:
return value
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
return repr("".join([str(v) for v in group]))
def _output_child_to_const(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> t.Any:
const = node.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
if isinstance(node, nodes.TemplateData):
return const
return finalize.const(const) # type: ignore
def _output_child_pre(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(
self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
) -> None:
if finalize.src is not None:
self.write(")")
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
concat = staticmethod(native_concat) # type: ignore
class NativeTemplate(Template):
environment_class = NativeEnvironment
def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Render the template to produce a native Python type. If the
result is a single node, its value is returned. Otherwise, the
nodes are concatenated as strings. If the result can be parsed
with :func:`ast.literal_eval`, the parsed value is returned.
Otherwise, the string is returned.
"""
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
self.root_render_func(ctx)
)
except Exception:
return self.environment.handle_exception()
async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if not self.environment.is_async:
raise RuntimeError(
"The environment was not created with async mode enabled."
)
ctx = self.new_context(dict(*args, **kwargs))
try:
return self.environment_class.concat( # type: ignore
[n async for n in self.root_render_func(ctx)] # type: ignore
)
except Exception:
return self.environment.handle_exception()
NativeEnvironment.template_class = NativeTemplate
| 4,210 | 31.145038 | 83 | py |
jinja | jinja-main/src/jinja2/idtracking.py | import typing as t
from . import nodes
from .visitor import NodeVisitor
VAR_LOAD_PARAMETER = "param"
VAR_LOAD_RESOLVE = "resolve"
VAR_LOAD_ALIAS = "alias"
VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(
nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(
node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
) -> "Symbols":
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols:
def __init__(
self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level: int = level
self.parent = parent
self.refs: t.Dict[str, str] = {}
self.loads: t.Dict[str, t.Any] = {}
self.stores: t.Set[str] = set()
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(
self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
) -> str:
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target: str) -> t.Optional[t.Any]:
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
return None
def find_ref(self, name: str) -> t.Optional[str]:
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
return None
def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self) -> "Symbols":
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name: str) -> None:
if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
stores: t.Dict[str, int] = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in stores.items():
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name) # type: ignore
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self) -> t.Dict[str, str]:
rv: t.Dict[str, str] = {}
node: t.Optional["Symbols"] = self
while node is not None:
for name in sorted(node.stores):
if name not in rv:
rv[name] = self.find_ref(name) # type: ignore
node = node.parent
return rv
def dump_param_targets(self) -> t.Set[str]:
rv = set()
node: t.Optional["Symbols"] = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols: "Symbols") -> None:
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = _simple_visit
visit_Block = _simple_visit
visit_Macro = _simple_visit
visit_FilterBlock = _simple_visit
visit_Scope = _simple_visit
visit_If = _simple_visit
visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(
self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
) -> None:
if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == "else":
branch = node.else_
elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError("Unknown for branch")
if branch:
for item in branch:
self.sym_visitor.visit(item)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols: "Symbols") -> None:
self.symbols = symbols
def visit_Name(
self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
) -> None:
"""All assignments to names go through this function."""
if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
elif node.ctx == "store":
self.symbols.store(node.name)
elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
self.symbols.load(node.name)
def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
self.symbols.store(node.name)
def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
self.visit(node.filter, **kwargs)
def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
"""Stop visiting at scopes."""
def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
"""Do not visit into overlay scopes."""
| 10,704 | 32.557994 | 85 | py |
jinja | jinja-main/src/jinja2/utils.py | import enum
import json
import os
import re
import typing as t
from collections import abc
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from types import CodeType
from urllib.parse import quote_from_bytes
import markupsafe
if t.TYPE_CHECKING:
import typing_extensions as te
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
# special singleton representing missing values for the runtime
missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})()
internal_code: t.MutableSet[CodeType] = set()
concat = "".join
def pass_context(f: F) -> F:
"""Pass the :class:`~jinja2.runtime.Context` as the first argument
to the decorated function when called while rendering a template.
Can be used on functions, filters, and tests.
If only ``Context.eval_context`` is needed, use
:func:`pass_eval_context`. If only ``Context.environment`` is
needed, use :func:`pass_environment`.
.. versionadded:: 3.0.0
Replaces ``contextfunction`` and ``contextfilter``.
"""
f.jinja_pass_arg = _PassArg.context # type: ignore
return f
def pass_eval_context(f: F) -> F:
"""Pass the :class:`~jinja2.nodes.EvalContext` as the first argument
to the decorated function when called while rendering a template.
See :ref:`eval-context`.
Can be used on functions, filters, and tests.
If only ``EvalContext.environment`` is needed, use
:func:`pass_environment`.
.. versionadded:: 3.0.0
Replaces ``evalcontextfunction`` and ``evalcontextfilter``.
"""
f.jinja_pass_arg = _PassArg.eval_context # type: ignore
return f
def pass_environment(f: F) -> F:
"""Pass the :class:`~jinja2.Environment` as the first argument to
the decorated function when called while rendering a template.
Can be used on functions, filters, and tests.
.. versionadded:: 3.0.0
Replaces ``environmentfunction`` and ``environmentfilter``.
"""
f.jinja_pass_arg = _PassArg.environment # type: ignore
return f
class _PassArg(enum.Enum):
context = enum.auto()
eval_context = enum.auto()
environment = enum.auto()
@classmethod
def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
if hasattr(obj, "jinja_pass_arg"):
return obj.jinja_pass_arg # type: ignore
return None
def internalcode(f: F) -> F:
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj: t.Any) -> bool:
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from .runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable: t.Iterable[t.Any]) -> None:
"""Consumes an iterable without doing anything with it."""
for _ in iterable:
pass
def clear_caches() -> None:
"""Jinja keeps internal caches for environments and lexers. These are
used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
get_spontaneous_environment.cache_clear()
_lexer_cache.clear()
def import_string(import_name: str, silent: bool = False) -> t.Any:
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ":" in import_name:
module, obj = import_name.split(":", 1)
elif "." in import_name:
module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename: str, mode: str = "rb") -> t.Optional[t.IO[t.Any]]:
"""Returns a file descriptor for the filename if that file exists,
otherwise ``None``.
"""
if not os.path.isfile(filename):
return None
return open(filename, mode)
def object_type_repr(obj: t.Any) -> str:
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return "None"
elif obj is Ellipsis:
return "Ellipsis"
cls = type(obj)
if cls.__module__ == "builtins":
return f"{cls.__name__} object"
return f"{cls.__module__}.{cls.__name__} object"
def pformat(obj: t.Any) -> str:
"""Format an object using :func:`pprint.pformat`."""
from pprint import pformat
return pformat(obj)
_http_re = re.compile(
r"""
^
(
(https?://|www\.) # scheme or www
(([\w%-]+\.)+)? # subdomain
(
[a-z]{2,63} # basic tld
|
xn--[\w%]{2,59} # idna tld
)
|
([\w%-]{2,63}\.)+ # basic domain
(com|net|int|edu|gov|org|info|mil) # basic tld
|
(https?://) # scheme
(
(([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
|
(\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
)
)
(?::[\d]{1,5})? # port
(?:[/?#]\S*)? # path, query, and fragment
$
""",
re.IGNORECASE | re.VERBOSE,
)
_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
def urlize(
text: str,
trim_url_limit: t.Optional[int] = None,
rel: t.Optional[str] = None,
target: t.Optional[str] = None,
extra_schemes: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Convert URLs in text into clickable links.
This may not recognize links in some situations. Usually, a more
comprehensive formatter, such as a Markdown library, is a better
choice.
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
addresses. Links with trailing punctuation (periods, commas, closing
parentheses) and leading punctuation (opening parentheses) are
recognized excluding the punctuation. Email addresses that include
header fields are not recognized (for example,
``mailto:address@example.com?cc=copy@example.com``).
:param text: Original text containing URLs to link.
:param trim_url_limit: Shorten displayed URL values to this length.
:param target: Add the ``target`` attribute to links.
:param rel: Add the ``rel`` attribute to links.
:param extra_schemes: Recognize URLs that start with these schemes
in addition to the default behavior.
.. versionchanged:: 3.0
The ``extra_schemes`` parameter was added.
.. versionchanged:: 3.0
Generate ``https://`` links for URLs without a scheme.
.. versionchanged:: 3.0
The parsing rules were updated. Recognize email addresses with
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
parentheses and brackets in more cases.
"""
if trim_url_limit is not None:
def trim_url(x: str) -> str:
if len(x) > trim_url_limit:
return f"{x[:trim_url_limit]}..."
return x
else:
def trim_url(x: str) -> str:
return x
words = re.split(r"(\s+)", str(markupsafe.escape(text)))
rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
for i, word in enumerate(words):
head, middle, tail = "", word, ""
match = re.match(r"^([(<]|<)+", middle)
if match:
head = match.group()
middle = middle[match.end() :]
# Unlike lead, which is anchored to the start of the string,
# need to check that the string ends with any of the characters
# before trying to match all of them, to avoid backtracking.
if middle.endswith((")", ">", ".", ",", "\n", ">")):
match = re.search(r"([)>.,\n]|>)+$", middle)
if match:
tail = match.group()
middle = middle[: match.start()]
# Prefer balancing parentheses in URLs instead of ignoring a
# trailing character.
for start_char, end_char in ("(", ")"), ("<", ">"), ("<", ">"):
start_count = middle.count(start_char)
if start_count <= middle.count(end_char):
# Balanced, or lighter on the left
continue
# Move as many as possible from the tail to balance
for _ in range(min(start_count, tail.count(end_char))):
end_index = tail.index(end_char) + len(end_char)
# Move anything in the tail before the end char too
middle += tail[:end_index]
tail = tail[end_index:]
if _http_re.match(middle):
if middle.startswith("https://") or middle.startswith("http://"):
middle = (
f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
)
else:
middle = (
f'<a href="https://{middle}"{rel_attr}{target_attr}>'
f"{trim_url(middle)}</a>"
)
elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
middle = f'<a href="{middle}">{middle[7:]}</a>'
elif (
"@" in middle
and not middle.startswith("www.")
and ":" not in middle
and _email_re.match(middle)
):
middle = f'<a href="mailto:{middle}">{middle}</a>'
elif extra_schemes is not None:
for scheme in extra_schemes:
if middle != scheme and middle.startswith(scheme):
middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
words[i] = f"{head}{middle}{tail}"
return "".join(words)
def generate_lorem_ipsum(
n: int = 5, html: bool = True, min: int = 20, max: int = 100
) -> str:
"""Generate some lorem ipsum for the template."""
from .constants import LOREM_IPSUM_WORDS
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p_str = " ".join(p)
if p_str.endswith(","):
p_str = p_str[:-1] + "."
elif not p_str.endswith("."):
p_str += "."
result.append(p_str)
if not html:
return "\n\n".join(result)
return markupsafe.Markup(
"\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
)
def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
"""Quote a string for use in a URL using the given charset.
:param obj: String or bytes to quote. Other types are converted to
string then encoded to bytes using the given charset.
:param charset: Encode text to bytes using this charset.
:param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, bytes):
if not isinstance(obj, str):
obj = str(obj)
obj = obj.encode(charset)
safe = b"" if for_qs else b"/"
rv = quote_from_bytes(obj, safe)
if for_qs:
rv = rv.replace("%20", "+")
return rv
@abc.MutableMapping.register
class LRUCache:
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self) -> t.Mapping[str, t.Any]:
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
self.__dict__.update(d)
self._postinit()
def __getnewargs__(
self,
) -> t.Tuple[int,]:
return (self.capacity,)
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self) -> None:
"""Clear the cache."""
with self._wlock:
self._mapping.clear()
self._queue.clear()
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self) -> int:
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self._mapping!r}>"
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
with self._wlock:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
pass
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
return [x[1] for x in self.items()]
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def __iter__(self) -> t.Iterator[t.Any]:
return reversed(tuple(self._queue))
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
def select_autoescape(
enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
disabled_extensions: t.Collection[str] = (),
default_for_string: bool = True,
default: bool = False,
) -> t.Callable[[t.Optional[str]], bool]:
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
def autoescape(template_name: t.Optional[str]) -> bool:
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(
obj: t.Any, dumps: t.Optional[t.Callable[..., str]] = None, **kwargs: t.Any
) -> markupsafe.Markup:
"""Serialize an object to a string of JSON with :func:`json.dumps`,
then replace HTML-unsafe characters with Unicode escapes and mark
the result safe with :class:`~markupsafe.Markup`.
This is available in templates as the ``|tojson`` filter.
The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param obj: The object to serialize to JSON.
:param dumps: The ``dumps`` function to use. Defaults to
``env.policies["json.dumps_function"]``, which defaults to
:func:`json.dumps`.
:param kwargs: Extra arguments to pass to ``dumps``. Merged onto
``env.policies["json.dumps_kwargs"]``.
.. versionchanged:: 3.0
The ``dumper`` parameter is renamed to ``dumps``.
.. versionadded:: 2.9
"""
if dumps is None:
dumps = json.dumps
return markupsafe.Markup(
dumps(obj, **kwargs)
.replace("<", "\\u003c")
.replace(">", "\\u003e")
.replace("&", "\\u0026")
.replace("'", "\\u0027")
)
class Cycler:
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
Similar to ``loop.cycle``, but can be used outside loops or across
multiple loops. For example, render a list of folders and files in a
list, alternating giving them "odd" and "even" classes.
.. code-block:: html+jinja
{% set row_class = cycler("odd", "even") %}
<ul class="browser">
{% for folder in folders %}
<li class="folder {{ row_class.next() }}">{{ folder }}
{% endfor %}
{% for file in files %}
<li class="file {{ row_class.next() }}">{{ file }}
{% endfor %}
</ul>
:param items: Each positional argument will be yielded in the order
given for each cycle.
.. versionadded:: 2.1
"""
def __init__(self, *items: t.Any) -> None:
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
def reset(self) -> None:
"""Resets the current item to the first item."""
self.pos = 0
@property
def current(self) -> t.Any:
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
return self.items[self.pos]
def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
__next__ = next
class Joiner:
"""A joining helper for templates."""
def __init__(self, sep: str = ", ") -> None:
self.sep = sep
self.used = False
def __call__(self) -> str:
if not self.used:
self.used = True
return ""
return self.sep
class Namespace:
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name: str) -> t.Any:
# __class__ is needed for the awaitable check in async mode
if name in {"_Namespace__attrs", "__class__"}:
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
raise AttributeError(name) from None
def __setitem__(self, name: str, value: t.Any) -> None:
self.__attrs[name] = value
def __repr__(self) -> str:
return f"<Namespace {self.__attrs!r}>"
| 23,961 | 30.612137 | 87 | py |
jinja | jinja-main/src/jinja2/sandbox.py | """A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
import typing as t
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
from .runtime import Context
from .runtime import Undefined
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec: t.Tuple[t.Tuple[t.Type[t.Any], t.FrozenSet[str]], ...] = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable: t.Callable[..., t.Any]) -> t.Optional[str]:
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
return None
def safe_range(*args: int) -> range:
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f: F) -> F:
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True # type: ignore
return f
def is_internal_attribute(obj: t.Any, attr: str) -> bool:
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
"+": operator.pos,
"-": operator.neg,
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: t.FrozenSet[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: t.FrozenSet[str] = frozenset()
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj: t.Any) -> bool:
"""Check if an object is safely callable. By default callables
are considered safe unless decorated with :func:`unsafe`.
This also recognizes the Django convention of setting
``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(
self, context: Context, operator: str, left: t.Any, right: t.Any
) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(
self,
s: str,
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
format_func: t.Optional[t.Callable[..., t.Any]] = None,
) -> str:
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
formatter: SandboxedFormatter
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, escape=s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = ()
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(
__self, # noqa: B902
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
if not super().is_safe_attribute(obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatter(Formatter):
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
super().__init__(**kwargs)
def get_field(
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
pass
| 14,615 | 33.06993 | 88 | py |
jinja | jinja-main/src/jinja2/_identifier.py | import re
# generated by scripts/generate_identifier_pattern.py
pattern = re.compile(
r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
)
| 805 | 114.142857 | 715 | py |
jinja | jinja-main/src/jinja2/runtime.py | """The runtime functions and state used by compiled templates."""
import functools
import sys
import typing as t
from collections import abc
from itertools import chain
from markupsafe import escape # noqa: F401
from markupsafe import Markup
from markupsafe import soft_str
from .async_utils import auto_aiter
from .async_utils import auto_await # noqa: F401
from .exceptions import TemplateNotFound # noqa: F401
from .exceptions import TemplateRuntimeError # noqa: F401
from .exceptions import UndefinedError
from .nodes import EvalContext
from .utils import _PassArg
from .utils import concat
from .utils import internalcode
from .utils import missing
from .utils import Namespace # noqa: F401
from .utils import object_type_repr
from .utils import pass_eval_context
V = t.TypeVar("V")
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
if t.TYPE_CHECKING:
import logging
import typing_extensions as te
from .environment import Environment
class LoopRenderFunc(te.Protocol):
def __call__(
self,
reciter: t.Iterable[V],
loop_render_func: "LoopRenderFunc",
depth: int = 0,
) -> str:
...
# these variables are exported to the template runtime
exported = [
"LoopContext",
"TemplateReference",
"Macro",
"Markup",
"TemplateRuntimeError",
"missing",
"escape",
"markup_join",
"str_join",
"identity",
"TemplateNotFound",
"Namespace",
"Undefined",
"internalcode",
]
async_exported = [
"AsyncLoopContext",
"auto_aiter",
"auto_await",
]
def identity(x: V) -> V:
"""Returns its argument. Useful for certain things in the
environment.
"""
return x
def markup_join(seq: t.Iterable[t.Any]) -> str:
"""Concatenation that escapes if necessary and converts to string."""
buf = []
iterator = map(soft_str, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, "__html__"):
return Markup("").join(chain(buf, iterator))
return concat(buf)
def str_join(seq: t.Iterable[t.Any]) -> str:
"""Simple args to string conversion and concatenation."""
return concat(map(str, seq))
def new_context(
environment: "Environment",
template_name: t.Optional[str],
blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
vars: t.Optional[t.Dict[str, t.Any]] = None,
shared: bool = False,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
locals: t.Optional[t.Mapping[str, t.Any]] = None,
) -> "Context":
"""Internal helper for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in locals.items():
if value is not missing:
parent[key] = value
return environment.context_class(
environment, parent, template_name, blocks, globals=globals
)
class TemplateReference:
"""The `self` in templates."""
def __init__(self, context: "Context") -> None:
self.__context = context
def __getitem__(self, name: str) -> t.Any:
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.__context.name!r}>"
def _dict_method_all(dict_method: F) -> F:
@functools.wraps(dict_method)
def f_all(self: "Context") -> t.Any:
return dict_method(self.get_all())
return t.cast(F, f_all)
@abc.Mapping.register
class Context:
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`pass_context` get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
def __init__(
self,
environment: "Environment",
parent: t.Dict[str, t.Any],
name: t.Optional[str],
blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
):
self.parent = parent
self.vars: t.Dict[str, t.Any] = {}
self.environment: "Environment" = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars: t.Set[str] = set()
self.name = name
self.globals_keys = set() if globals is None else set(globals)
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = {k: [v] for k, v in blocks.items()}
def super(
self, name: str, current: t.Callable[["Context"], t.Iterator[str]]
) -> t.Union["BlockReference", "Undefined"]:
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined(
f"there is no parent block called {name!r}.", name="super"
)
return BlockReference(name, self, blocks, index)
def get(self, key: str, default: t.Any = None) -> t.Any:
"""Look up a variable by name, or return a default if the key is
not found.
:param key: The variable name to look up.
:param default: The value to return if the key is not found.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key: str) -> t.Union[t.Any, "Undefined"]:
"""Look up a variable by name, or return an :class:`Undefined`
object if the key is not found.
If you need to add custom behavior, override
:meth:`resolve_or_missing`, not this method. The various lookup
functions use that method, not this one.
:param key: The variable name to look up.
"""
rv = self.resolve_or_missing(key)
if rv is missing:
return self.environment.undefined(name=key)
return rv
def resolve_or_missing(self, key: str) -> t.Any:
"""Look up a variable by name, or return a ``missing`` sentinel
if the key is not found.
Override this method to add custom lookup behavior.
:meth:`resolve`, :meth:`get`, and :meth:`__getitem__` use this
method. Don't call this method directly.
:param key: The variable name to look up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return missing
def get_exported(self) -> t.Dict[str, t.Any]:
"""Get a new dict with the exported variables."""
return {k: self.vars[k] for k in self.exported_vars}
def get_all(self) -> t.Dict[str, t.Any]:
"""Return the complete context as dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
return dict(self.parent, **self.vars)
@internalcode
def call(
__self, # noqa: B902
__obj: t.Callable[..., t.Any],
*args: t.Any,
**kwargs: t.Any,
) -> t.Union[t.Any, "Undefined"]:
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable has :func:`pass_context` or
:func:`pass_environment`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
if (
hasattr(__obj, "__call__") # noqa: B004
and _PassArg.from_obj(__obj.__call__) is not None
):
__obj = __obj.__call__
pass_arg = _PassArg.from_obj(__obj)
if pass_arg is _PassArg.context:
# the active context should have access to variables set in
# loops and blocks without mutating the context itself
if kwargs.get("_loop_vars"):
__self = __self.derived(kwargs["_loop_vars"])
if kwargs.get("_block_vars"):
__self = __self.derived(kwargs["_block_vars"])
args = (__self,) + args
elif pass_arg is _PassArg.eval_context:
args = (__self.eval_ctx,) + args
elif pass_arg is _PassArg.environment:
args = (__self.environment,) + args
kwargs.pop("_block_vars", None)
kwargs.pop("_loop_vars", None)
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined(
"value was undefined because a callable raised a"
" StopIteration exception"
)
def derived(self, locals: t.Optional[t.Dict[str, t.Any]] = None) -> "Context":
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
context = new_context(
self.environment, self.name, {}, self.get_all(), True, None, locals
)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in self.blocks.items())
return context
keys = _dict_method_all(dict.keys)
values = _dict_method_all(dict.values)
items = _dict_method_all(dict.items)
def __contains__(self, name: str) -> bool:
return name in self.vars or name in self.parent
def __getitem__(self, key: str) -> t.Any:
"""Look up a variable by name with ``[]`` syntax, or raise a
``KeyError`` if the key is not found.
"""
item = self.resolve_or_missing(key)
if item is missing:
raise KeyError(key)
return item
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.get_all()!r} of {self.name!r}>"
class BlockReference:
"""One block on a template reference."""
def __init__(
self,
name: str,
context: "Context",
stack: t.List[t.Callable[["Context"], t.Iterator[str]]],
depth: int,
) -> None:
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self) -> t.Union["BlockReference", "Undefined"]:
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment.undefined(
f"there is no parent block called {self.name!r}.", name="super"
)
return BlockReference(self.name, self._context, self._stack, self._depth + 1)
@internalcode
async def _async_call(self) -> str:
rv = concat(
[x async for x in self._stack[self._depth](self._context)] # type: ignore
)
if self._context.eval_ctx.autoescape:
return Markup(rv)
return rv
@internalcode
def __call__(self) -> str:
if self._context.environment.is_async:
return self._async_call() # type: ignore
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
return Markup(rv)
return rv
class LoopContext:
"""A wrapper iterable for dynamic ``for`` loops, with information
about the loop and iteration.
"""
#: Current iteration of the loop, starting at 0.
index0 = -1
_length: t.Optional[int] = None
_after: t.Any = missing
_current: t.Any = missing
_before: t.Any = missing
_last_changed_value: t.Any = missing
def __init__(
self,
iterable: t.Iterable[V],
undefined: t.Type["Undefined"],
recurse: t.Optional["LoopRenderFunc"] = None,
depth0: int = 0,
) -> None:
"""
:param iterable: Iterable to wrap.
:param undefined: :class:`Undefined` class to use for next and
previous items.
:param recurse: The function to render the loop body when the
loop is marked recursive.
:param depth0: Incremented when looping recursively.
"""
self._iterable = iterable
self._iterator = self._to_iterator(iterable)
self._undefined = undefined
self._recurse = recurse
#: How many levels deep a recursive loop currently is, starting at 0.
self.depth0 = depth0
@staticmethod
def _to_iterator(iterable: t.Iterable[V]) -> t.Iterator[V]:
return iter(iterable)
@property
def length(self) -> int:
"""Length of the iterable.
If the iterable is a generator or otherwise does not have a
size, it is eagerly evaluated to get a size.
"""
if self._length is not None:
return self._length
try:
self._length = len(self._iterable) # type: ignore
except TypeError:
iterable = list(self._iterator)
self._iterator = self._to_iterator(iterable)
self._length = len(iterable) + self.index + (self._after is not missing)
return self._length
def __len__(self) -> int:
return self.length
@property
def depth(self) -> int:
"""How many levels deep a recursive loop currently is, starting at 1."""
return self.depth0 + 1
@property
def index(self) -> int:
"""Current iteration of the loop, starting at 1."""
return self.index0 + 1
@property
def revindex0(self) -> int:
"""Number of iterations from the end of the loop, ending at 0.
Requires calculating :attr:`length`.
"""
return self.length - self.index
@property
def revindex(self) -> int:
"""Number of iterations from the end of the loop, ending at 1.
Requires calculating :attr:`length`.
"""
return self.length - self.index0
@property
def first(self) -> bool:
"""Whether this is the first iteration of the loop."""
return self.index0 == 0
def _peek_next(self) -> t.Any:
"""Return the next element in the iterable, or :data:`missing`
if the iterable is exhausted. Only peeks one item ahead, caching
the result in :attr:`_last` for use in subsequent checks. The
cache is reset when :meth:`__next__` is called.
"""
if self._after is not missing:
return self._after
self._after = next(self._iterator, missing)
return self._after
@property
def last(self) -> bool:
"""Whether this is the last iteration of the loop.
Causes the iterable to advance early. See
:func:`itertools.groupby` for issues this can cause.
The :func:`groupby` filter avoids that issue.
"""
return self._peek_next() is missing
@property
def previtem(self) -> t.Union[t.Any, "Undefined"]:
"""The item in the previous iteration. Undefined during the
first iteration.
"""
if self.first:
return self._undefined("there is no previous item")
return self._before
@property
def nextitem(self) -> t.Union[t.Any, "Undefined"]:
"""The item in the next iteration. Undefined during the last
iteration.
Causes the iterable to advance early. See
:func:`itertools.groupby` for issues this can cause.
The :func:`jinja-filters.groupby` filter avoids that issue.
"""
rv = self._peek_next()
if rv is missing:
return self._undefined("there is no next item")
return rv
def cycle(self, *args: V) -> V:
"""Return a value from the given args, cycling through based on
the current :attr:`index0`.
:param args: One or more values to cycle through.
"""
if not args:
raise TypeError("no items for cycling given")
return args[self.index0 % len(args)]
def changed(self, *value: t.Any) -> bool:
"""Return ``True`` if previously called with a different value
(including when called for the first time).
:param value: One or more values to compare to the last call.
"""
if self._last_changed_value != value:
self._last_changed_value = value
return True
return False
def __iter__(self) -> "LoopContext":
return self
def __next__(self) -> t.Tuple[t.Any, "LoopContext"]:
if self._after is not missing:
rv = self._after
self._after = missing
else:
rv = next(self._iterator)
self.index0 += 1
self._before = self._current
self._current = rv
return rv, self
@internalcode
def __call__(self, iterable: t.Iterable[V]) -> str:
"""When iterating over nested data, render the body of the loop
recursively with the given inner iterable data.
The loop must have the ``recursive`` marker for this to work.
"""
if self._recurse is None:
raise TypeError(
"The loop must have the 'recursive' marker to be called recursively."
)
return self._recurse(iterable, self._recurse, depth=self.depth)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.index}/{self.length}>"
class AsyncLoopContext(LoopContext):
_iterator: t.AsyncIterator[t.Any] # type: ignore
@staticmethod
def _to_iterator( # type: ignore
iterable: t.Union[t.Iterable[V], t.AsyncIterable[V]]
) -> t.AsyncIterator[V]:
return auto_aiter(iterable)
@property
async def length(self) -> int: # type: ignore
if self._length is not None:
return self._length
try:
self._length = len(self._iterable) # type: ignore
except TypeError:
iterable = [x async for x in self._iterator]
self._iterator = self._to_iterator(iterable)
self._length = len(iterable) + self.index + (self._after is not missing)
return self._length
@property
async def revindex0(self) -> int: # type: ignore
return await self.length - self.index
@property
async def revindex(self) -> int: # type: ignore
return await self.length - self.index0
async def _peek_next(self) -> t.Any:
if self._after is not missing:
return self._after
try:
self._after = await self._iterator.__anext__()
except StopAsyncIteration:
self._after = missing
return self._after
@property
async def last(self) -> bool: # type: ignore
return await self._peek_next() is missing
@property
async def nextitem(self) -> t.Union[t.Any, "Undefined"]:
rv = await self._peek_next()
if rv is missing:
return self._undefined("there is no next item")
return rv
def __aiter__(self) -> "AsyncLoopContext":
return self
async def __anext__(self) -> t.Tuple[t.Any, "AsyncLoopContext"]:
if self._after is not missing:
rv = self._after
self._after = missing
else:
rv = await self._iterator.__anext__()
self.index0 += 1
self._before = self._current
self._current = rv
return rv, self
class Macro:
"""Wraps a macro function."""
def __init__(
self,
environment: "Environment",
func: t.Callable[..., str],
name: str,
arguments: t.List[str],
catch_kwargs: bool,
catch_varargs: bool,
caller: bool,
default_autoescape: t.Optional[bool] = None,
):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
self.explicit_caller = "caller" in arguments
if default_autoescape is None:
if callable(environment.autoescape):
default_autoescape = environment.autoescape(None)
else:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@internalcode
@pass_eval_context
def __call__(self, *args: t.Any, **kwargs: t.Any) -> str:
# This requires a bit of explanation, In the past we used to
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
# problematic for macros because whether a macro is safe depends not
# on the escape mode when it was defined, but rather when it was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
# will continue to not pass one), we need to perform an instance
# check here.
#
# This is considered safe because an eval context is not a valid
# argument to callables otherwise anyway. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
autoescape = args[0].autoescape
args = args[1:]
else:
autoescape = self._default_autoescape
# try to consume the positional arguments
arguments = list(args[: self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
# of caller in the `macro_body` handler in the compiler.
found_caller = False
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for name in self.arguments[len(arguments) :]:
try:
value = kwargs.pop(name)
except KeyError:
value = missing
if name == "caller":
found_caller = True
arguments.append(value)
else:
found_caller = self.explicit_caller
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
caller = kwargs.pop("caller", None)
if caller is None:
caller = self._environment.undefined("No caller defined", name="caller")
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
if "caller" in kwargs:
raise TypeError(
f"macro {self.name!r} was invoked with two values for the special"
" caller argument. This is most likely a bug."
)
raise TypeError(
f"macro {self.name!r} takes no keyword argument {next(iter(kwargs))!r}"
)
if self.catch_varargs:
arguments.append(args[self._argument_count :])
elif len(args) > self._argument_count:
raise TypeError(
f"macro {self.name!r} takes not more than"
f" {len(self.arguments)} argument(s)"
)
return self._invoke(arguments, autoescape)
async def _async_invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
rv = await self._func(*arguments) # type: ignore
if autoescape:
return Markup(rv)
return rv # type: ignore
def _invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
if self._environment.is_async:
return self._async_invoke(arguments, autoescape) # type: ignore
rv = self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
def __repr__(self) -> str:
name = "anonymous" if self.name is None else repr(self.name)
return f"<{type(self).__name__} {name}>"
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = (
"_undefined_hint",
"_undefined_obj",
"_undefined_name",
"_undefined_exception",
)
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@property
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
if self._undefined_hint:
return self._undefined_hint
if self._undefined_obj is missing:
return f"{self._undefined_name!r} is undefined"
if not isinstance(self._undefined_name, str):
return (
f"{object_type_repr(self._undefined_obj)} has no"
f" element {self._undefined_name!r}"
)
return (
f"{object_type_repr(self._undefined_obj)!r} has no"
f" attribute {self._undefined_name!r}"
)
@internalcode
def _fail_with_undefined_error(
self, *args: t.Any, **kwargs: t.Any
) -> "te.NoReturn":
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
raise self._undefined_exception(self._undefined_message)
@internalcode
def __getattr__(self, name: str) -> t.Any:
if name[:2] == "__":
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __sub__ = __rsub__ = _fail_with_undefined_error
__mul__ = __rmul__ = __div__ = __rdiv__ = _fail_with_undefined_error
__truediv__ = __rtruediv__ = _fail_with_undefined_error
__floordiv__ = __rfloordiv__ = _fail_with_undefined_error
__mod__ = __rmod__ = _fail_with_undefined_error
__pos__ = __neg__ = _fail_with_undefined_error
__call__ = __getitem__ = _fail_with_undefined_error
__lt__ = __le__ = __gt__ = __ge__ = _fail_with_undefined_error
__int__ = __float__ = __complex__ = _fail_with_undefined_error
__pow__ = __rpow__ = _fail_with_undefined_error
def __eq__(self, other: t.Any) -> bool:
return type(self) is type(other)
def __ne__(self, other: t.Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return id(type(self))
def __str__(self) -> str:
return ""
def __len__(self) -> int:
return 0
def __iter__(self) -> t.Iterator[t.Any]:
yield from ()
async def __aiter__(self) -> t.AsyncIterator[t.Any]:
for _ in ():
yield
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
return "Undefined"
def make_logging_undefined(
logger: t.Optional["logging.Logger"] = None, base: t.Type[Undefined] = Undefined
) -> t.Type[Undefined]:
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
def _log_message(undef: Undefined) -> None:
logger.warning("Template variable warning: %s", undef._undefined_message)
class LoggingUndefined(base): # type: ignore
__slots__ = ()
def _fail_with_undefined_error( # type: ignore
self, *args: t.Any, **kwargs: t.Any
) -> "te.NoReturn":
try:
super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as e:
logger.error("Template variable error: %s", e) # type: ignore
raise e
def __str__(self) -> str:
_log_message(self)
return super().__str__() # type: ignore
def __iter__(self) -> t.Iterator[t.Any]:
_log_message(self)
return super().__iter__() # type: ignore
def __bool__(self) -> bool:
_log_message(self)
return super().__bool__() # type: ignore
return LoggingUndefined
class ChainableUndefined(Undefined):
"""An undefined that is chainable, where both ``__getattr__`` and
``__getitem__`` return itself rather than raising an
:exc:`UndefinedError`.
>>> foo = ChainableUndefined(name='foo')
>>> str(foo.bar['baz'])
''
>>> foo.bar['baz'] + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
.. versionadded:: 2.11.0
"""
__slots__ = ()
def __html__(self) -> str:
return str(self)
def __getattr__(self, _: str) -> "ChainableUndefined":
return self
__getitem__ = __getattr__ # type: ignore
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self) -> str:
if self._undefined_hint:
message = f"undefined value printed: {self._undefined_hint}"
elif self._undefined_obj is missing:
message = self._undefined_name # type: ignore
else:
message = (
f"no such element: {object_type_repr(self._undefined_obj)}"
f"[{self._undefined_name!r}]"
)
return f"{{{{ {message} }}}}"
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = Undefined._fail_with_undefined_error
__eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
__contains__ = Undefined._fail_with_undefined_error
# Remove slots attributes, after the metaclass is applied they are
# unneeded and contain wrong data for subclasses.
del (
Undefined.__slots__,
ChainableUndefined.__slots__,
DebugUndefined.__slots__,
StrictUndefined.__slots__,
)
| 33,443 | 30.700474 | 88 | py |
jinja | jinja-main/src/jinja2/compiler.py | """Compiles nodes from the parser into Python code."""
import typing as t
from contextlib import contextmanager
from functools import update_wrapper
from io import StringIO
from itertools import chain
from keyword import iskeyword as is_python_keyword
from markupsafe import escape
from markupsafe import Markup
from . import nodes
from .exceptions import TemplateAssertionError
from .idtracking import Symbols
from .idtracking import VAR_LOAD_ALIAS
from .idtracking import VAR_LOAD_PARAMETER
from .idtracking import VAR_LOAD_RESOLVE
from .idtracking import VAR_LOAD_UNDEFINED
from .nodes import EvalContext
from .optimizer import Optimizer
from .utils import _PassArg
from .utils import concat
from .visitor import NodeVisitor
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
operators = {
"eq": "==",
"ne": "!=",
"gt": ">",
"gteq": ">=",
"lt": "<",
"lteq": "<=",
"in": "in",
"notin": "not in",
}
def optimizeconst(f: F) -> F:
def new_func(
self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any
) -> t.Any:
# Only optimize if the frame is not volatile
if self.optimizer is not None and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
return update_wrapper(t.cast(F, new_func), f)
def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]:
@optimizeconst
def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None:
if (
self.environment.sandboxed
and op in self.environment.intercepted_binops # type: ignore
):
self.write(f"environment.call_binop(context, {op!r}, ")
self.visit(node.left, frame)
self.write(", ")
self.visit(node.right, frame)
else:
self.write("(")
self.visit(node.left, frame)
self.write(f" {op} ")
self.visit(node.right, frame)
self.write(")")
return visitor
def _make_unop(
op: str,
) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]:
@optimizeconst
def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None:
if (
self.environment.sandboxed
and op in self.environment.intercepted_unops # type: ignore
):
self.write(f"environment.call_unop(context, {op!r}, ")
self.visit(node.node, frame)
else:
self.write("(" + op)
self.visit(node.node, frame)
self.write(")")
return visitor
def generate(
node: nodes.Template,
environment: "Environment",
name: t.Optional[str],
filename: t.Optional[str],
stream: t.Optional[t.TextIO] = None,
defer_init: bool = False,
optimized: bool = True,
) -> t.Optional[str]:
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError("Can't compile non template nodes")
generator = environment.code_generator_class(
environment, name, filename, stream, defer_init, optimized
)
generator.visit(node)
if stream is None:
return generator.stream.getvalue() # type: ignore
return None
def has_safe_repr(value: t.Any) -> bool:
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in {bool, int, float, complex, range, str, Markup}:
return True
if type(value) in {tuple, list, set, frozenset}:
return all(has_safe_repr(v) for v in value)
if type(value) is dict:
return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items())
return False
def find_undeclared(
nodes: t.Iterable[nodes.Node], names: t.Iterable[str]
) -> t.Set[str]:
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class MacroRef:
def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None:
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
class Frame:
"""Holds compile time information for us."""
def __init__(
self,
eval_ctx: EvalContext,
parent: t.Optional["Frame"] = None,
level: t.Optional[int] = None,
) -> None:
self.eval_ctx = eval_ctx
# the parent of this frame
self.parent = parent
if parent is None:
self.symbols = Symbols(level=level)
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = False
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer: t.Optional[str] = None
# the name of the block we're in, otherwise None.
self.block: t.Optional[str] = None
else:
self.symbols = Symbols(parent.symbols, level=level)
self.require_output_check = parent.require_output_check
self.buffer = parent.buffer
self.block = parent.block
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# variables set inside of loops and blocks should not affect outer frames,
# but they still needs to be kept track of as part of the active context.
self.loop_frame = False
self.block_frame = False
# track whether the frame is being used in an if-statement or conditional
# expression as it determines which errors should be raised during runtime
# or compile time.
self.soft_frame = False
def copy(self) -> "Frame":
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
def inner(self, isolated: bool = False) -> "Frame":
"""Return an inner frame."""
if isolated:
return Frame(self.eval_ctx, level=self.symbols.level + 1)
return Frame(self.eval_ctx, self)
def soft(self) -> "Frame":
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
This is only used to implement if-statements and conditional
expressions.
"""
rv = self.copy()
rv.rootlevel = False
rv.soft_frame = True
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self) -> None:
self.filters: t.Set[str] = set()
self.tests: t.Set[str] = set()
def visit_Filter(self, node: nodes.Filter) -> None:
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node: nodes.Test) -> None:
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names: t.Iterable[str]) -> None:
self.names = set(names)
self.undeclared: t.Set[str] = set()
def visit_Name(self, node: nodes.Name) -> None:
if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting a blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(
self,
environment: "Environment",
name: t.Optional[str],
filename: t.Optional[str],
stream: t.Optional[t.TextIO] = None,
defer_init: bool = False,
optimized: bool = True,
) -> None:
if stream is None:
stream = StringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimizer: t.Optional[Optimizer] = None
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases: t.Dict[str, str] = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks: t.Dict[str, nodes.Block] = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests: t.Dict[str, str] = {}
self.filters: t.Dict[str, str] = {}
# the debug information
self.debug_info: t.List[t.Tuple[int, int]] = []
self._write_debug_info: t.Optional[int] = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack: t.List[t.Set[str]] = []
# Tracks parameter definition blocks
self._param_def_block: t.List[t.Set[str]] = []
# Tracks the current context.
self._context_reference_stack = ["context"]
@property
def optimized(self) -> bool:
return self.optimizer is not None
# -- Various compilation helpers
def fail(self, msg: str, lineno: int) -> "te.NoReturn":
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self) -> str:
"""Get a new unique identifier."""
self._last_identifier += 1
return f"t_{self._last_identifier}"
def buffer(self, frame: Frame) -> None:
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline(f"{frame.buffer} = []")
def return_buffer_contents(
self, frame: Frame, force_unescaped: bool = False
) -> None:
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline("if context.eval_ctx.autoescape:")
self.indent()
self.writeline(f"return Markup(concat({frame.buffer}))")
self.outdent()
self.writeline("else:")
self.indent()
self.writeline(f"return concat({frame.buffer})")
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline(f"return Markup(concat({frame.buffer}))")
return
self.writeline(f"return concat({frame.buffer})")
def indent(self) -> None:
"""Indent by one."""
self._indentation += 1
def outdent(self, step: int = 1) -> None:
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None:
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline("yield ", node)
else:
self.writeline(f"{frame.buffer}.append(", node)
def end_write(self, frame: Frame) -> None:
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(")")
def simple_write(
self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None
) -> None:
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None:
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline("pass")
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x: str) -> None:
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write("\n" * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info, self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(" " * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(
self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0
) -> None:
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None:
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(
self,
node: t.Union[nodes.Call, nodes.Filter, nodes.Test],
frame: Frame,
extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
) -> None:
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occur. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = any(
is_python_keyword(t.cast(str, k))
for k in chain((x.key for x in node.kwargs), extra_kwargs or ())
)
for arg in node.args:
self.write(", ")
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in extra_kwargs.items():
self.write(f", {key}={value}")
if node.dyn_args:
self.write(", *")
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(", **dict({")
else:
self.write(", **{")
for kwarg in node.kwargs:
self.write(f"{kwarg.key!r}: ")
self.visit(kwarg.value, frame)
self.write(", ")
if extra_kwargs is not None:
for key, value in extra_kwargs.items():
self.write(f"{key!r}: {value}, ")
if node.dyn_kwargs is not None:
self.write("}, **")
self.visit(node.dyn_kwargs, frame)
self.write(")")
else:
self.write("}")
elif node.dyn_kwargs is not None:
self.write(", **")
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None:
"""Find all filter and test names used in the template and
assign them to variables in the compiled namespace. Checking
that the names are registered with the environment is done when
compiling the Filter and Test nodes. If the node is in an If or
CondExpr node, the check is done at runtime instead.
.. versionchanged:: 3.0
Filters and tests in If and CondExpr nodes are checked at
runtime instead of compile time.
"""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for id_map, names, dependency in (self.filters, visitor.filters, "filters"), (
self.tests,
visitor.tests,
"tests",
):
for name in sorted(names):
if name not in id_map:
id_map[name] = self.temporary_identifier()
# add check during runtime that dependencies used inside of executed
# blocks are defined, as this step may be skipped during compile time
self.writeline("try:")
self.indent()
self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]")
self.outdent()
self.writeline("except KeyError:")
self.indent()
self.writeline("@internalcode")
self.writeline(f"def {id_map[name]}(*unused):")
self.indent()
self.writeline(
f'raise TemplateRuntimeError("No {dependency[:-1]}'
f' named {name!r} found.")'
)
self.outdent()
self.outdent()
def enter_frame(self, frame: Frame) -> None:
undefs = []
for target, (action, param) in frame.symbols.loads.items():
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline(f"{target} = {self.get_resolve_func()}({param!r})")
elif action == VAR_LOAD_ALIAS:
self.writeline(f"{target} = {param}")
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError("unknown load instruction")
if undefs:
self.writeline(f"{' = '.join(undefs)} = missing")
def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None:
if not with_python_scope:
undefs = []
for target in frame.symbols.loads:
undefs.append(target)
if undefs:
self.writeline(f"{' = '.join(undefs)} = missing")
def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str:
return async_value if self.environment.is_async else sync_value
def func(self, name: str) -> str:
return f"{self.choose_async()}def {name}"
def macro_body(
self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame
) -> t.Tuple[Frame, MacroRef]:
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == "caller":
explicit_caller = idx
if arg.name in ("kwargs", "varargs"):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
if "caller" in undeclared:
# In older Jinja versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail(
"When defining macros or call blocks the "
'special "caller" argument must be omitted '
"or be given a default.",
node.lineno,
)
else:
args.append(frame.symbols.declare_parameter("caller"))
macro_ref.accesses_caller = True
if "kwargs" in undeclared and "kwargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("kwargs"))
macro_ref.accesses_kwargs = True
if "varargs" in undeclared and "varargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("varargs"))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline(f"{self.func('macro')}({', '.join(args)}):", node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline(f"if {ref} is missing:")
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline(
f'{ref} = undefined("parameter {arg.name!r} was not provided",'
f" name={arg.name!r})"
)
else:
self.writeline(f"{ref} = ")
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None:
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
arg_tuple += ","
self.write(
f"Macro(environment, macro, {name!r}, ({arg_tuple}),"
f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r},"
f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)"
)
def position(self, node: nodes.Node) -> str:
"""Return a human readable position for the node."""
rv = f"line {node.lineno}"
if self.name is not None:
rv = f"{rv} in {self.name!r}"
return rv
def dump_local_context(self, frame: Frame) -> str:
items_kv = ", ".join(
f"{name!r}: {target}"
for name, target in frame.symbols.dump_stores().items()
)
return f"{{{items_kv}}}"
def write_commons(self) -> None:
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline("resolve = context.resolve_or_missing")
self.writeline("undefined = environment.undefined")
self.writeline("concat = environment.concat")
# always use the standard Undefined class for the implicit else of
# conditional expressions
self.writeline("cond_expr_undefined = Undefined")
self.writeline("if 0: yield None")
def push_parameter_definitions(self, frame: Frame) -> None:
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self) -> None:
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target: str) -> None:
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def push_context_reference(self, target: str) -> None:
self._context_reference_stack.append(target)
def pop_context_reference(self) -> None:
self._context_reference_stack.pop()
def get_context_ref(self) -> str:
return self._context_reference_stack[-1]
def get_resolve_func(self) -> str:
target = self._context_reference_stack[-1]
if target == "context":
return "resolve"
return f"{target}.resolve"
def derive_context(self, frame: Frame) -> str:
return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})"
def parameter_is_undeclared(self, target: str) -> bool:
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self) -> None:
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame: Frame) -> None:
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if (
not frame.block_frame
and not frame.loop_frame
and not frame.toplevel
or not vars
):
return
public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
if frame.loop_frame:
self.writeline(f"_loop_vars[{name!r}] = {ref}")
return
if frame.block_frame:
self.writeline(f"_block_vars[{name!r}] = {ref}")
return
self.writeline(f"context.vars[{name!r}] = {ref}")
else:
if frame.loop_frame:
self.writeline("_loop_vars.update({")
elif frame.block_frame:
self.writeline("_block_vars.update({")
else:
self.writeline("context.vars.update({")
for idx, name in enumerate(vars):
if idx:
self.write(", ")
ref = frame.symbols.ref(name)
self.write(f"{name!r}: {ref}")
self.write("})")
if not frame.block_frame and not frame.loop_frame and public_names:
if len(public_names) == 1:
self.writeline(f"context.exported_vars.add({public_names[0]!r})")
else:
names_str = ", ".join(map(repr, public_names))
self.writeline(f"context.exported_vars.update(({names_str}))")
# -- Statement Visitors
def visit_Template(
self, node: nodes.Template, frame: t.Optional[Frame] = None
) -> None:
assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
from .runtime import exported, async_exported
if self.environment.is_async:
exported_names = sorted(exported + async_exported)
else:
exported_names = sorted(exported)
self.writeline("from jinja2.runtime import " + ", ".join(exported_names))
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = "" if self.defer_init else ", environment=environment"
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail(f"block {block.name!r} defined twice", block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if "." in imp:
module, obj = imp.rsplit(".", 1)
self.writeline(f"from {module} import {obj} as {alias}")
else:
self.writeline(f"import {imp} as {alias}")
# add the load name
self.writeline(f"name = {self.name!r}")
# generate the root render function.
self.writeline(
f"{self.func('root')}(context, missing=missing{envenv}):", extra=1
)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if "self" in find_undeclared(node.body, ("self",)):
ref = frame.symbols.declare_parameter("self")
self.writeline(f"{ref} = TemplateReference(context)")
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline("parent_template = None")
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline("if parent_template is not None:")
self.indent()
if not self.environment.is_async:
self.writeline("yield from parent_template.root_render_func(context)")
else:
self.writeline(
"async for event in parent_template.root_render_func(context):"
)
self.indent()
self.writeline("yield event")
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in self.blocks.items():
self.writeline(
f"{self.func('block_' + name)}(context, missing=missing{envenv}):",
block,
1,
)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
block_frame.block_frame = True
undeclared = find_undeclared(block.body, ("self", "super"))
if "self" in undeclared:
ref = block_frame.symbols.declare_parameter("self")
self.writeline(f"{ref} = TemplateReference(context)")
if "super" in undeclared:
ref = block_frame.symbols.declare_parameter("super")
self.writeline(f"{ref} = context.super({name!r}, block_{name})")
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.writeline("_block_vars = {}")
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks)
self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1)
debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info)
self.writeline(f"debug_info = {debug_kv_str!r}")
def visit_Block(self, node: nodes.Block, frame: Frame) -> None:
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline("if parent_template is None:")
self.indent()
level += 1
if node.scoped:
context = self.derive_context(frame)
else:
context = self.get_context_ref()
if node.required:
self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node)
self.indent()
self.writeline(
f'raise TemplateRuntimeError("Required block {node.name!r} not found")',
node,
)
self.outdent()
if not self.environment.is_async and frame.buffer is None:
self.writeline(
f"yield from context.blocks[{node.name!r}][0]({context})", node
)
else:
self.writeline(
f"{self.choose_async()}for event in"
f" context.blocks[{node.name!r}][0]({context}):",
node,
)
self.indent()
self.simple_write("event", frame)
self.outdent()
self.outdent(level)
def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None:
"""Calls the extender."""
if not frame.toplevel:
self.fail("cannot use extend from a non top-level scope", node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline("if parent_template is not None:")
self.indent()
self.writeline('raise TemplateRuntimeError("extended multiple times")')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
self.write(f", {self.name!r})")
self.writeline("for name, parent_block in parent_template.blocks.items():")
self.indent()
self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node: nodes.Include, frame: Frame) -> None:
"""Handles includes."""
if node.ignore_missing:
self.writeline("try:")
self.indent()
func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, str):
func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = "select_template"
self.writeline(f"template = environment.{func_name}(", node)
self.visit(node.template, frame)
self.write(f", {self.name!r})")
if node.ignore_missing:
self.outdent()
self.writeline("except TemplateNotFound:")
self.indent()
self.writeline("pass")
self.outdent()
self.writeline("else:")
self.indent()
skip_event_yield = False
if node.with_context:
self.writeline(
f"{self.choose_async()}for event in template.root_render_func("
"template.new_context(context.get_all(), True,"
f" {self.dump_local_context(frame)})):"
)
elif self.environment.is_async:
self.writeline(
"for event in (await template._get_default_module_async())"
"._body_stream:"
)
else:
self.writeline("yield from template._get_default_module()._body_stream")
skip_event_yield = True
if not skip_event_yield:
self.indent()
self.simple_write("event", frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def _import_common(
self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame
) -> None:
self.write(f"{self.choose_async('await ')}environment.get_template(")
self.visit(node.template, frame)
self.write(f", {self.name!r}).")
if node.with_context:
f_name = f"make_module{self.choose_async('_async')}"
self.write(
f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})"
)
else:
self.write(f"_get_default_module{self.choose_async('_async')}(context)")
def visit_Import(self, node: nodes.Import, frame: Frame) -> None:
"""Visit regular imports."""
self.writeline(f"{frame.symbols.ref(node.target)} = ", node)
if frame.toplevel:
self.write(f"context.vars[{node.target!r}] = ")
self._import_common(node, frame)
if frame.toplevel and not node.target.startswith("_"):
self.writeline(f"context.exported_vars.discard({node.target!r})")
def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None:
"""Visit named imports."""
self.newline(node)
self.write("included_template = ")
self._import_common(node, frame)
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline(
f"{frame.symbols.ref(alias)} ="
f" getattr(included_template, {name!r}, missing)"
)
self.writeline(f"if {frame.symbols.ref(alias)} is missing:")
self.indent()
message = (
"the template {included_template.__name__!r}"
f" (imported on {self.position(node)})"
f" does not export the requested name {name!r}"
)
self.writeline(
f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})"
)
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith("_"):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}")
else:
names_kv = ", ".join(
f"{name!r}: {frame.symbols.ref(name)}" for name in var_names
)
self.writeline(f"context.vars.update({{{names_kv}}})")
if discarded_names:
if len(discarded_names) == 1:
self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})")
else:
names_str = ", ".join(map(repr, discarded_names))
self.writeline(
f"context.exported_vars.difference_update(({names_str}))"
)
def visit_For(self, node: nodes.For, frame: Frame) -> None:
loop_frame = frame.inner()
loop_frame.loop_frame = True
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body if the body is a scoped block.
extended_loop = (
node.recursive
or "loop"
in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",))
or any(block.scoped for block in node.find_all(nodes.Block))
)
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter("loop")
loop_frame.symbols.analyze_node(node, for_branch="body")
if node.else_:
else_frame.symbols.analyze_node(node, for_branch="else")
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch="test")
self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.choose_async("async for ", "for "))
self.visit(node.target, loop_frame)
self.write(" in ")
self.write(self.choose_async("auto_aiter(fiter)", "fiter"))
self.write(":")
self.indent()
self.writeline("if ", node.test)
self.visit(node.test, test_frame)
self.write(":")
self.indent()
self.writeline("yield ")
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline(
f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node
)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline(f"{loop_ref} = missing")
for name in node.find_all(nodes.Name):
if name.ctx == "store" and name.name == "loop":
self.fail(
"Can't assign to special loop variable in for-loop target",
name.lineno,
)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline(f"{iteration_indicator} = 1")
self.writeline(self.choose_async("async for ", "for "), node)
self.visit(node.target, loop_frame)
if extended_loop:
self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(")
else:
self.write(" in ")
if node.test:
self.write(f"{loop_filter_func}(")
if node.recursive:
self.write("reciter")
else:
if self.environment.is_async and not extended_loop:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(")")
if node.test:
self.write(")")
if node.recursive:
self.write(", undefined, loop_render_func, depth):")
else:
self.write(", undefined):" if extended_loop else ":")
self.indent()
self.enter_frame(loop_frame)
self.writeline("_loop_vars = {}")
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline(f"{iteration_indicator} = 0")
self.outdent()
self.leave_frame(
loop_frame, with_python_scope=node.recursive and not node.else_
)
if node.else_:
self.writeline(f"if {iteration_indicator}:")
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write(f"{self.choose_async('await ')}loop(")
if self.environment.is_async:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(")")
self.write(", loop)")
self.end_write(frame)
# at the end of the iteration, clear any assignments made in the
# loop from the top level
if self._assign_stack:
self._assign_stack[-1].difference_update(loop_frame.symbols.stores)
def visit_If(self, node: nodes.If, frame: Frame) -> None:
if_frame = frame.soft()
self.writeline("if ", node)
self.visit(node.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
self.writeline("elif ", elif_)
self.visit(elif_.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
self.writeline("else:")
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None:
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith("_"):
self.write(f"context.exported_vars.add({node.name!r})")
self.writeline(f"context.vars[{node.name!r}] = ")
self.write(f"{frame.symbols.ref(node.name)} = ")
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None:
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None:
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node: nodes.With, frame: Frame) -> None:
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for target, expr in zip(node.targets, node.values):
self.newline()
self.visit(target, with_frame)
self.write(" = ")
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None:
self.newline(node)
self.visit(node.node, frame)
class _FinalizeInfo(t.NamedTuple):
const: t.Optional[t.Callable[..., str]]
src: t.Optional[str]
@staticmethod
def _default_finalize(value: t.Any) -> t.Any:
"""The default finalize function if the environment isn't
configured with one. Or, if the environment has one, this is
called on that function's output for constants.
"""
return str(value)
_finalize: t.Optional[_FinalizeInfo] = None
def _make_finalize(self) -> _FinalizeInfo:
"""Build the finalize function to be used on constants and at
runtime. Cached so it's only created once for all output nodes.
Returns a ``namedtuple`` with the following attributes:
``const``
A function to finalize constant data at compile time.
``src``
Source code to output around nodes to be evaluated at
runtime.
"""
if self._finalize is not None:
return self._finalize
finalize: t.Optional[t.Callable[..., t.Any]]
finalize = default = self._default_finalize
src = None
if self.environment.finalize:
src = "environment.finalize("
env_finalize = self.environment.finalize
pass_arg = {
_PassArg.context: "context",
_PassArg.eval_context: "context.eval_ctx",
_PassArg.environment: "environment",
}.get(
_PassArg.from_obj(env_finalize) # type: ignore
)
finalize = None
if pass_arg is None:
def finalize(value: t.Any) -> t.Any:
return default(env_finalize(value))
else:
src = f"{src}{pass_arg}, "
if pass_arg == "environment":
def finalize(value: t.Any) -> t.Any:
return default(env_finalize(self.environment, value))
self._finalize = self._FinalizeInfo(finalize, src)
return self._finalize
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
"""Given a group of constant values converted from ``Output``
child nodes, produce a string to write to the template module
source.
"""
return repr(concat(group))
def _output_child_to_const(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> str:
"""Try to optimize a child of an ``Output`` node by trying to
convert it to constant, finalized data at compile time.
If :exc:`Impossible` is raised, the node is not constant and
will be evaluated at runtime. Any other exception will also be
evaluated at runtime for easier debugging.
"""
const = node.as_const(frame.eval_ctx)
if frame.eval_ctx.autoescape:
const = escape(const)
# Template data doesn't go through finalize.
if isinstance(node, nodes.TemplateData):
return str(const)
return finalize.const(const) # type: ignore
def _output_child_pre(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> None:
"""Output extra source code before visiting a child of an
``Output`` node.
"""
if frame.eval_ctx.volatile:
self.write("(escape if context.eval_ctx.autoescape else str)(")
elif frame.eval_ctx.autoescape:
self.write("escape(")
else:
self.write("str(")
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> None:
"""Output extra source code after visiting a child of an
``Output`` node.
"""
self.write(")")
if finalize.src is not None:
self.write(")")
def visit_Output(self, node: nodes.Output, frame: Frame) -> None:
# If an extends is active, don't render outside a block.
if frame.require_output_check:
# A top-level extends is known to exist at compile time.
if self.has_known_extends:
return
self.writeline("if parent_template is None:")
self.indent()
finalize = self._make_finalize()
body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = []
# Evaluate constants at compile time if possible. Each item in
# body will be either a list of static data or a node to be
# evaluated at runtime.
for child in node.nodes:
try:
if not (
# If the finalize function requires runtime context,
# constants can't be evaluated at compile time.
finalize.const
# Unless it's basic template data that won't be
# finalized anyway.
or isinstance(child, nodes.TemplateData)
):
raise nodes.Impossible()
const = self._output_child_to_const(child, frame, finalize)
except (nodes.Impossible, Exception):
# The node was not constant and needs to be evaluated at
# runtime. Or another error was raised, which is easier
# to debug at runtime.
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
if frame.buffer is not None:
if len(body) == 1:
self.writeline(f"{frame.buffer}.append(")
else:
self.writeline(f"{frame.buffer}.extend((")
self.indent()
for item in body:
if isinstance(item, list):
# A group of constant data to join and output.
val = self._output_const_repr(item)
if frame.buffer is None:
self.writeline("yield " + val)
else:
self.writeline(val + ",")
else:
if frame.buffer is None:
self.writeline("yield ", item)
else:
self.newline(item)
# A node to be evaluated at runtime.
self._output_child_pre(item, frame, finalize)
self.visit(item, frame)
self._output_child_post(item, frame, finalize)
if frame.buffer is not None:
self.write(",")
if frame.buffer is not None:
self.outdent()
self.writeline(")" if len(body) == 1 else "))")
if frame.require_output_check:
self.outdent()
def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None:
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
self.write(" = ")
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None:
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
self.write(f"concat({block_frame.buffer})")
self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node: nodes.Name, frame: Frame) -> None:
if node.ctx == "store" and (
frame.toplevel or frame.loop_frame or frame.block_frame
):
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == "load":
load = frame.symbols.find_load(ref)
if not (
load is not None
and load[0] == VAR_LOAD_PARAMETER
and not self.parameter_is_undeclared(ref)
):
self.write(
f"(undefined(name={node.name!r}) if {ref} is missing else {ref})"
)
return
self.write(ref)
def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None:
# NSRefs can only be used to store values; since they use the normal
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
self.writeline(f"if not isinstance({ref}, Namespace):")
self.indent()
self.writeline(
"raise TemplateRuntimeError"
'("cannot assign attribute on non-namespace object")'
)
self.outdent()
self.writeline(f"{ref}[{node.attr!r}]")
def visit_Const(self, node: nodes.Const, frame: Frame) -> None:
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None:
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write(
f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})"
)
def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None:
self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write(",)" if idx == 0 else ")")
def visit_List(self, node: nodes.List, frame: Frame) -> None:
self.write("[")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write("]")
def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None:
self.write("{")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item.key, frame)
self.write(": ")
self.visit(item.value, frame)
self.write("}")
visit_Add = _make_binop("+")
visit_Sub = _make_binop("-")
visit_Mul = _make_binop("*")
visit_Div = _make_binop("/")
visit_FloorDiv = _make_binop("//")
visit_Pow = _make_binop("**")
visit_Mod = _make_binop("%")
visit_And = _make_binop("and")
visit_Or = _make_binop("or")
visit_Pos = _make_unop("+")
visit_Neg = _make_unop("-")
visit_Not = _make_unop("not ")
@optimizeconst
def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None:
if frame.eval_ctx.volatile:
func_name = "(markup_join if context.eval_ctx.volatile else str_join)"
elif frame.eval_ctx.autoescape:
func_name = "markup_join"
else:
func_name = "str_join"
self.write(f"{func_name}((")
for arg in node.nodes:
self.visit(arg, frame)
self.write(", ")
self.write("))")
@optimizeconst
def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None:
self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
self.write(")")
def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None:
self.write(f" {operators[node.op]} ")
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getattr(")
self.visit(node.node, frame)
self.write(f", {node.attr!r})")
if self.environment.is_async:
self.write("))")
@optimizeconst
def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None:
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write("[")
self.visit(node.arg, frame)
self.write("]")
else:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getitem(")
self.visit(node.node, frame)
self.write(", ")
self.visit(node.arg, frame)
self.write(")")
if self.environment.is_async:
self.write("))")
def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None:
if node.start is not None:
self.visit(node.start, frame)
self.write(":")
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(":")
self.visit(node.step, frame)
@contextmanager
def _filter_test_common(
self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool
) -> t.Iterator[None]:
if self.environment.is_async:
self.write("(await auto_await(")
if is_filter:
self.write(f"{self.filters[node.name]}(")
func = self.environment.filters.get(node.name)
else:
self.write(f"{self.tests[node.name]}(")
func = self.environment.tests.get(node.name)
# When inside an If or CondExpr frame, allow the filter to be
# undefined at compile time and only raise an error if it's
# actually called at runtime. See pull_dependencies.
if func is None and not frame.soft_frame:
type_name = "filter" if is_filter else "test"
self.fail(f"No {type_name} named {node.name!r}.", node.lineno)
pass_arg = {
_PassArg.context: "context",
_PassArg.eval_context: "context.eval_ctx",
_PassArg.environment: "environment",
}.get(
_PassArg.from_obj(func) # type: ignore
)
if pass_arg is not None:
self.write(f"{pass_arg}, ")
# Back to the visitor function to handle visiting the target of
# the filter or test.
yield
self.signature(node, frame)
self.write(")")
if self.environment.is_async:
self.write("))")
@optimizeconst
def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None:
with self._filter_test_common(node, frame, True):
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write(
f"(Markup(concat({frame.buffer}))"
f" if context.eval_ctx.autoescape else concat({frame.buffer}))"
)
elif frame.eval_ctx.autoescape:
self.write(f"Markup(concat({frame.buffer}))")
else:
self.write(f"concat({frame.buffer})")
@optimizeconst
def visit_Test(self, node: nodes.Test, frame: Frame) -> None:
with self._filter_test_common(node, frame, False):
self.visit(node.node, frame)
@optimizeconst
def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None:
frame = frame.soft()
def write_expr2() -> None:
if node.expr2 is not None:
self.visit(node.expr2, frame)
return
self.write(
f'cond_expr_undefined("the inline if-expression on'
f" {self.position(node)} evaluated to false and no else"
f' section was defined.")'
)
self.write("(")
self.visit(node.expr1, frame)
self.write(" if ")
self.visit(node.test, frame)
self.write(" else ")
write_expr2()
self.write(")")
@optimizeconst
def visit_Call(
self, node: nodes.Call, frame: Frame, forward_caller: bool = False
) -> None:
if self.environment.is_async:
self.write("(await auto_await(")
if self.environment.sandboxed:
self.write("environment.call(context, ")
else:
self.write("context.call(")
self.visit(node.node, frame)
extra_kwargs = {"caller": "caller"} if forward_caller else None
loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {}
block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {}
if extra_kwargs:
extra_kwargs.update(loop_kwargs, **block_kwargs)
elif loop_kwargs or block_kwargs:
extra_kwargs = dict(loop_kwargs, **block_kwargs)
self.signature(node, frame, extra_kwargs)
self.write(")")
if self.environment.is_async:
self.write("))")
def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None:
self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None:
self.write("Markup(")
self.visit(node.expr, frame)
self.write(")")
def visit_MarkSafeIfAutoescape(
self, node: nodes.MarkSafeIfAutoescape, frame: Frame
) -> None:
self.write("(Markup if context.eval_ctx.autoescape else identity)(")
self.visit(node.expr, frame)
self.write(")")
def visit_EnvironmentAttribute(
self, node: nodes.EnvironmentAttribute, frame: Frame
) -> None:
self.write("environment." + node.name)
def visit_ExtensionAttribute(
self, node: nodes.ExtensionAttribute, frame: Frame
) -> None:
self.write(f"environment.extensions[{node.identifier!r}].{node.name}")
def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None:
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None:
self.write(node.name)
def visit_ContextReference(
self, node: nodes.ContextReference, frame: Frame
) -> None:
self.write("context")
def visit_DerivedContextReference(
self, node: nodes.DerivedContextReference, frame: Frame
) -> None:
self.write(self.derive_context(frame))
def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None:
self.writeline("continue", node)
def visit_Break(self, node: nodes.Break, frame: Frame) -> None:
self.writeline("break", node)
def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None:
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None:
ctx = self.temporary_identifier()
self.writeline(f"{ctx} = {self.derive_context(frame)}")
self.writeline(f"{ctx}.vars = ")
self.visit(node.context, frame)
self.push_context_reference(ctx)
scope_frame = frame.inner(isolated=True)
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
self.pop_context_reference()
def visit_EvalContextModifier(
self, node: nodes.EvalContextModifier, frame: Frame
) -> None:
for keyword in node.options:
self.writeline(f"context.eval_ctx.{keyword.key} = ")
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(
self, node: nodes.ScopedEvalContextModifier, frame: Frame
) -> None:
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline(f"{old_ctx_name} = context.eval_ctx.save()")
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline(f"context.eval_ctx.revert({old_ctx_name})")
| 72,171 | 35.878896 | 88 | py |
jinja | jinja-main/src/jinja2/defaults.py | import typing as t
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
from .utils import Cycler
from .utils import generate_lorem_ipsum
from .utils import Joiner
from .utils import Namespace
if t.TYPE_CHECKING:
import typing_extensions as te
# defaults for the parser / lexer
BLOCK_START_STRING = "{%"
BLOCK_END_STRING = "%}"
VARIABLE_START_STRING = "{{"
VARIABLE_END_STRING = "}}"
COMMENT_START_STRING = "{#"
COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX: t.Optional[str] = None
LINE_COMMENT_PREFIX: t.Optional[str] = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n"
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
DEFAULT_NAMESPACE = {
"range": range,
"dict": dict,
"lipsum": generate_lorem_ipsum,
"cycler": Cycler,
"joiner": Joiner,
"namespace": Namespace,
}
# default policies
DEFAULT_POLICIES: t.Dict[str, t.Any] = {
"compiler.ascii_str": True,
"urlize.rel": "noopener",
"urlize.target": None,
"urlize.extra_schemes": None,
"truncate.leeway": 5,
"json.dumps_function": None,
"json.dumps_kwargs": {"sort_keys": True},
"ext.i18n.trimmed": False,
}
| 1,267 | 24.877551 | 61 | py |
jinja | jinja-main/src/jinja2/bccache.py | """The optional bytecode cache system. This is useful if you have very
complex template situations and the compilation of all those templates
slows down your application too much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
"""
import errno
import fnmatch
import marshal
import os
import pickle
import stat
import sys
import tempfile
import typing as t
from hashlib import sha1
from io import BytesIO
from types import CodeType
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
class _MemcachedClient(te.Protocol):
def get(self, key: str) -> bytes:
...
def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None:
...
bc_version = 5
# Magic bytes to identify Jinja bytecode cache files. Contains the
# Python major and minor version to avoid loading incompatible bytecode
# if a project upgrades its Python version.
bc_magic = (
b"j2"
+ pickle.dumps(bc_version, 2)
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
)
class Bucket:
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment: "Environment", key: str, checksum: str) -> None:
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self) -> None:
"""Resets the bucket (unloads the bytecode)."""
self.code: t.Optional[CodeType] = None
def load_bytecode(self, f: t.BinaryIO) -> None:
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal.load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f: t.IO[bytes]) -> None:
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal.dump(self.code, f)
def bytecode_from_string(self, string: bytes) -> None:
"""Load bytecode from bytes."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self) -> bytes:
"""Return the bytecode as bytes."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache:
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja.
"""
def load_bytecode(self, bucket: Bucket) -> None:
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket: Bucket) -> None:
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self) -> None:
"""Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(
self, name: str, filename: t.Optional[t.Union[str]] = None
) -> str:
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode("utf-8"))
if filename is not None:
hash.update(f"|{filename}".encode())
return hash.hexdigest()
def get_source_checksum(self, source: str) -> str:
"""Returns a checksum for the source."""
return sha1(source.encode("utf-8")).hexdigest()
def get_bucket(
self,
environment: "Environment",
name: str,
filename: t.Optional[str],
source: str,
) -> Bucket:
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket: Bucket) -> None:
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(
self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache"
) -> None:
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self) -> str:
def _unsafe_dir() -> "te.NoReturn":
raise RuntimeError(
"Cannot determine safe temp directory. You "
"need to explicitly provide one."
)
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == "nt":
return tmpdir
if not hasattr(os, "getuid"):
_unsafe_dir()
dirname = f"_jinja2-cache-{os.getuid()}"
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket: Bucket) -> str:
return os.path.join(self.directory, self.pattern % (bucket.key,))
def load_bytecode(self, bucket: Bucket) -> None:
filename = self._get_cache_filename(bucket)
# Don't test for existence before opening the file, since the
# file could disappear after the test before the open.
try:
f = open(filename, "rb")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# PermissionError can occur on Windows when an operation is
# in progress, such as calling clear().
return
with f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket: Bucket) -> None:
# Write to a temporary file, then rename to the real name after
# writing. This avoids another process reading the file before
# it is fully written.
name = self._get_cache_filename(bucket)
f = tempfile.NamedTemporaryFile(
mode="wb",
dir=os.path.dirname(name),
prefix=os.path.basename(name),
suffix=".tmp",
delete=False,
)
def remove_silent() -> None:
try:
os.remove(f.name)
except OSError:
# Another process may have called clear(). On Windows,
# another program may be holding the file open.
pass
try:
with f:
bucket.write_bytecode(f)
except BaseException:
remove_silent()
raise
try:
os.replace(f.name, name)
except OSError:
# Another process may have called clear(). On Windows,
# another program may be holding the file open.
remove_silent()
except BaseException:
remove_silent()
raise
def clear(self) -> None:
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",))
for filename in files:
try:
remove(os.path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `cachelib <https://github.com/pallets/cachelib>`_
- `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only text. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(
self,
client: "_MemcachedClient",
prefix: str = "jinja2/bytecode/",
timeout: t.Optional[int] = None,
ignore_memcache_errors: bool = True,
):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket: Bucket) -> None:
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
else:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket: Bucket) -> None:
key = self.prefix + bucket.key
value = bucket.bytecode_to_string()
try:
if self.timeout is not None:
self.client.set(key, value, self.timeout)
else:
self.client.set(key, value)
except Exception:
if not self.ignore_memcache_errors:
raise
| 14,061 | 33.550369 | 87 | py |
jinja | jinja-main/src/jinja2/__init__.py | """Jinja is a template engine written in pure Python. It provides a
non-XML syntax that supports inline expressions and an optional
sandboxed environment.
"""
from .bccache import BytecodeCache as BytecodeCache
from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache
from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache
from .environment import Environment as Environment
from .environment import Template as Template
from .exceptions import TemplateAssertionError as TemplateAssertionError
from .exceptions import TemplateError as TemplateError
from .exceptions import TemplateNotFound as TemplateNotFound
from .exceptions import TemplateRuntimeError as TemplateRuntimeError
from .exceptions import TemplatesNotFound as TemplatesNotFound
from .exceptions import TemplateSyntaxError as TemplateSyntaxError
from .exceptions import UndefinedError as UndefinedError
from .loaders import BaseLoader as BaseLoader
from .loaders import ChoiceLoader as ChoiceLoader
from .loaders import DictLoader as DictLoader
from .loaders import FileSystemLoader as FileSystemLoader
from .loaders import FunctionLoader as FunctionLoader
from .loaders import ModuleLoader as ModuleLoader
from .loaders import PackageLoader as PackageLoader
from .loaders import PrefixLoader as PrefixLoader
from .runtime import ChainableUndefined as ChainableUndefined
from .runtime import DebugUndefined as DebugUndefined
from .runtime import make_logging_undefined as make_logging_undefined
from .runtime import StrictUndefined as StrictUndefined
from .runtime import Undefined as Undefined
from .utils import clear_caches as clear_caches
from .utils import is_undefined as is_undefined
from .utils import pass_context as pass_context
from .utils import pass_environment as pass_environment
from .utils import pass_eval_context as pass_eval_context
from .utils import select_autoescape as select_autoescape
__version__ = "3.2.0.dev0"
| 1,932 | 49.868421 | 72 | py |
jinja | jinja-main/src/jinja2/meta.py | """Functions that expose information about templates that might be
interesting for introspection.
"""
import typing as t
from . import nodes
from .compiler import CodeGenerator
from .compiler import Frame
if t.TYPE_CHECKING:
from .environment import Environment
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment: "Environment") -> None:
super().__init__(environment, "<introspection>", "<introspection>")
self.undeclared_identifiers: t.Set[str] = set()
def write(self, x: str) -> None:
"""Don't write."""
def enter_frame(self, frame: Frame) -> None:
"""Remember all undeclared identifiers."""
super().enter_frame(frame)
for _, (action, param) in frame.symbols.loads.items():
if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast: nodes.Template) -> t.Set[str]:
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == {'bar'}
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment) # type: ignore
codegen.visit(ast)
return codegen.undeclared_identifiers
_ref_types = (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
_RefType = t.Union[nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include]
def find_referenced_templates(ast: nodes.Template) -> t.Iterator[t.Optional[str]]:
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
template_name: t.Any
for node in ast.find_all(_ref_types):
template: nodes.Expr = node.template # type: ignore
if not isinstance(template, nodes.Const):
# a tuple with some non consts in there
if isinstance(template, (nodes.Tuple, nodes.List)):
for template_name in template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, str):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(template.value, str):
yield template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and isinstance(
template.value, (tuple, list)
):
for template_name in template.value:
if isinstance(template_name, str):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
| 4,396 | 38.258929 | 82 | py |
jinja | jinja-main/src/jinja2/filters.py | """Built-in template filters used with the ``|`` operator."""
import math
import random
import re
import typing
import typing as t
from collections import abc
from itertools import chain
from itertools import groupby
from markupsafe import escape
from markupsafe import Markup
from markupsafe import soft_str
from .async_utils import async_variant
from .async_utils import auto_aiter
from .async_utils import auto_await
from .async_utils import auto_to_list
from .exceptions import FilterArgumentError
from .runtime import Undefined
from .utils import htmlsafe_json_dumps
from .utils import pass_context
from .utils import pass_environment
from .utils import pass_eval_context
from .utils import pformat
from .utils import url_quote
from .utils import urlize
if t.TYPE_CHECKING:
import typing_extensions as te
from .environment import Environment
from .nodes import EvalContext
from .runtime import Context
from .sandbox import SandboxedEnvironment # noqa: F401
class HasHTML(te.Protocol):
def __html__(self) -> str:
pass
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
K = t.TypeVar("K")
V = t.TypeVar("V")
def ignore_case(value: V) -> V:
"""For use as a postprocessor for :func:`make_attrgetter`. Converts strings
to lowercase and returns other types as-is."""
if isinstance(value, str):
return t.cast(V, value.lower())
return value
def make_attrgetter(
environment: "Environment",
attribute: t.Optional[t.Union[str, int]],
postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
default: t.Optional[t.Any] = None,
) -> t.Callable[[t.Any], t.Any]:
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
parts = _prepare_attribute_parts(attribute)
def attrgetter(item: t.Any) -> t.Any:
for part in parts:
item = environment.getitem(item, part)
if default is not None and isinstance(item, Undefined):
item = default
if postprocess is not None:
item = postprocess(item)
return item
return attrgetter
def make_multi_attrgetter(
environment: "Environment",
attribute: t.Optional[t.Union[str, int]],
postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
) -> t.Callable[[t.Any], t.List[t.Any]]:
"""Returns a callable that looks up the given comma separated
attributes from a passed object with the rules of the environment.
Dots are allowed to access attributes of each attribute. Integer
parts in paths are looked up as integers.
The value returned by the returned callable is a list of extracted
attribute values.
Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
"""
if isinstance(attribute, str):
split: t.Sequence[t.Union[str, int, None]] = attribute.split(",")
else:
split = [attribute]
parts = [_prepare_attribute_parts(item) for item in split]
def attrgetter(item: t.Any) -> t.List[t.Any]:
items = [None] * len(parts)
for i, attribute_part in enumerate(parts):
item_i = item
for part in attribute_part:
item_i = environment.getitem(item_i, part)
if postprocess is not None:
item_i = postprocess(item_i)
items[i] = item_i
return items
return attrgetter
def _prepare_attribute_parts(
attr: t.Optional[t.Union[str, int]]
) -> t.List[t.Union[str, int]]:
if attr is None:
return []
if isinstance(attr, str):
return [int(x) if x.isdigit() else x for x in attr.split(".")]
return [attr]
def do_forceescape(value: "t.Union[str, HasHTML]") -> Markup:
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, "__html__"):
value = t.cast("HasHTML", value).__html__()
return escape(str(value))
def do_urlencode(
value: t.Union[str, t.Mapping[str, t.Any], t.Iterable[t.Tuple[str, t.Any]]]
) -> str:
"""Quote data for use in a URL path or query using UTF-8.
Basic wrapper around :func:`urllib.parse.quote` when given a
string, or :func:`urllib.parse.urlencode` for a dict or iterable.
:param value: Data to quote. A string will be quoted directly. A
dict or iterable of ``(key, value)`` pairs will be joined as a
query string.
When given a string, "/" is not quoted. HTTP servers treat "/" and
"%2F" equivalently in paths. If you need quoted slashes, use the
``|replace("/", "%2F")`` filter.
.. versionadded:: 2.7
"""
if isinstance(value, str) or not isinstance(value, abc.Iterable):
return url_quote(value)
if isinstance(value, dict):
items: t.Iterable[t.Tuple[str, t.Any]] = value.items()
else:
items = value # type: ignore
return "&".join(
f"{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}" for k, v in items
)
@pass_eval_context
def do_replace(
eval_ctx: "EvalContext", s: str, old: str, new: str, count: t.Optional[int] = None
) -> str:
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return str(s).replace(str(old), str(new), count)
if (
hasattr(old, "__html__")
or hasattr(new, "__html__")
and not hasattr(s, "__html__")
):
s = escape(s)
else:
s = soft_str(s)
return s.replace(soft_str(old), soft_str(new), count)
def do_upper(s: str) -> str:
"""Convert a value to uppercase."""
return soft_str(s).upper()
def do_lower(s: str) -> str:
"""Convert a value to lowercase."""
return soft_str(s).lower()
def do_items(value: t.Union[t.Mapping[K, V], Undefined]) -> t.Iterator[t.Tuple[K, V]]:
"""Return an iterator over the ``(key, value)`` items of a mapping.
``x|items`` is the same as ``x.items()``, except if ``x`` is
undefined an empty iterator is returned.
This filter is useful if you expect the template to be rendered with
an implementation of Jinja in another programming language that does
not have a ``.items()`` method on its mapping type.
.. code-block:: html+jinja
<dl>
{% for key, value in my_dict|items %}
<dt>{{ key }}
<dd>{{ value }}
{% endfor %}
</dl>
.. versionadded:: 3.1
"""
if isinstance(value, Undefined):
return
if not isinstance(value, abc.Mapping):
raise TypeError("Can only get item pairs from a mapping.")
yield from value.items()
@pass_eval_context
def do_xmlattr(
eval_ctx: "EvalContext", d: t.Mapping[str, t.Any], autospace: bool = True
) -> str:
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = " ".join(
f'{escape(key)}="{escape(value)}"'
for key, value in d.items()
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = " " + rv
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s: str) -> str:
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_str(s).capitalize()
_word_beginning_split_re = re.compile(r"([-\s({\[<]+)")
def do_title(s: str) -> str:
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
return "".join(
[
item[0].upper() + item[1:].lower()
for item in _word_beginning_split_re.split(soft_str(s))
if item
]
)
def do_dictsort(
value: t.Mapping[K, V],
case_sensitive: bool = False,
by: 'te.Literal["key", "value"]' = "key",
reverse: bool = False,
) -> t.List[t.Tuple[K, V]]:
"""Sort a dict and yield (key, value) pairs. Python dicts may not
be in the order you want to display them in, so sort them first.
.. sourcecode:: jinja
{% for key, value in mydict|dictsort %}
sort the dict by key, case insensitive
{% for key, value in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for key, value in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for key, value in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
if by == "key":
pos = 0
elif by == "value":
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item: t.Tuple[t.Any, t.Any]) -> t.Any:
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse)
@pass_environment
def do_sort(
environment: "Environment",
value: "t.Iterable[V]",
reverse: bool = False,
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.List[V]":
"""Sort an iterable using Python's :func:`sorted`.
.. sourcecode:: jinja
{% for city in cities|sort %}
...
{% endfor %}
:param reverse: Sort descending instead of ascending.
:param case_sensitive: When sorting strings, sort upper and lower
case separately.
:param attribute: When sorting objects or dicts, an attribute or
key to sort by. Can use dot notation like ``"address.city"``.
Can be a list of attributes like ``"age,name"``.
The sort is stable, it does not change the relative order of
elements that compare equal. This makes it is possible to chain
sorts on different attributes and ordering.
.. sourcecode:: jinja
{% for user in users|sort(attribute="name")
|sort(reverse=true, attribute="age") %}
...
{% endfor %}
As a shortcut to chaining when the direction is the same for all
attributes, pass a comma separate list of attributes.
.. sourcecode:: jinja
{% for user in users|sort(attribute="age,name") %}
...
{% endfor %}
.. versionchanged:: 2.11.0
The ``attribute`` parameter can be a comma separated list of
attributes, e.g. ``"age,name"``.
.. versionchanged:: 2.6
The ``attribute`` parameter was added.
"""
key_func = make_multi_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@pass_environment
def do_unique(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Iterator[V]":
"""Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
{{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter.
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
seen = set()
for item in value:
key = getter(item)
if key not in seen:
seen.add(key)
yield item
def _min_or_max(
environment: "Environment",
value: "t.Iterable[V]",
func: "t.Callable[..., V]",
case_sensitive: bool,
attribute: t.Optional[t.Union[str, int]],
) -> "t.Union[V, Undefined]":
it = iter(value)
try:
first = next(it)
except StopIteration:
return environment.undefined("No aggregated item, sequence was empty.")
key_func = make_attrgetter(
environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@pass_environment
def do_min(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Union[V, Undefined]":
"""Return the smallest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|min }}
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the min value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@pass_environment
def do_max(
environment: "Environment",
value: "t.Iterable[V]",
case_sensitive: bool = False,
attribute: t.Optional[t.Union[str, int]] = None,
) -> "t.Union[V, Undefined]":
"""Return the largest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|max }}
-> 3
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute.
"""
return _min_or_max(environment, value, max, case_sensitive, attribute)
def do_default(
value: V,
default_value: V = "", # type: ignore
boolean: bool = False,
) -> V:
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
.. versionchanged:: 2.11
It's now possible to configure the :class:`~jinja2.Environment` with
:class:`~jinja2.ChainableUndefined` to make the `default` filter work
on nested elements and attributes that may contain undefined values
in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@pass_eval_context
def sync_do_join(
eval_ctx: "EvalContext",
value: t.Iterable[t.Any],
d: str = "",
attribute: t.Optional[t.Union[str, int]] = None,
) -> str:
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = map(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
return str(d).join(map(str, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, "__html__"):
do_escape = True
else:
value[idx] = str(item)
if do_escape:
d = escape(d)
else:
d = str(d)
return d.join(value)
# no html involved, to normal joining
return soft_str(d).join(map(soft_str, value))
@async_variant(sync_do_join) # type: ignore
async def do_join(
eval_ctx: "EvalContext",
value: t.Union[t.AsyncIterable[t.Any], t.Iterable[t.Any]],
d: str = "",
attribute: t.Optional[t.Union[str, int]] = None,
) -> str:
return sync_do_join(eval_ctx, await auto_to_list(value), d, attribute)
def do_center(value: str, width: int = 80) -> str:
"""Centers the value in a field of a given width."""
return soft_str(value).center(width)
@pass_environment
def sync_do_first(
environment: "Environment", seq: "t.Iterable[V]"
) -> "t.Union[V, Undefined]":
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined("No first item, sequence was empty.")
@async_variant(sync_do_first) # type: ignore
async def do_first(
environment: "Environment", seq: "t.Union[t.AsyncIterable[V], t.Iterable[V]]"
) -> "t.Union[V, Undefined]":
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined("No first item, sequence was empty.")
@pass_environment
def do_last(
environment: "Environment", seq: "t.Reversible[V]"
) -> "t.Union[V, Undefined]":
"""Return the last item of a sequence.
Note: Does not work with generators. You may want to explicitly
convert it to a list:
.. sourcecode:: jinja
{{ data | selectattr('name', '==', 'Jinja') | list | last }}
"""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined("No last item, sequence was empty.")
# No async do_last, it may not be safe in async mode.
@pass_context
def do_random(context: "Context", seq: "t.Sequence[V]") -> "t.Union[V, Undefined]":
"""Return a random item from the sequence."""
try:
return random.choice(seq)
except IndexError:
return context.environment.undefined("No random item, sequence was empty.")
def do_filesizeformat(value: t.Union[str, float, int], binary: bool = False) -> str:
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = 1024 if binary else 1000
prefixes = [
("KiB" if binary else "kB"),
("MiB" if binary else "MB"),
("GiB" if binary else "GB"),
("TiB" if binary else "TB"),
("PiB" if binary else "PB"),
("EiB" if binary else "EB"),
("ZiB" if binary else "ZB"),
("YiB" if binary else "YB"),
]
if bytes == 1:
return "1 Byte"
elif bytes < base:
return f"{int(bytes)} Bytes"
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return f"{base * bytes / unit:.1f} {prefix}"
return f"{base * bytes / unit:.1f} {prefix}"
def do_pprint(value: t.Any) -> str:
"""Pretty print a variable. Useful for debugging."""
return pformat(value)
_uri_scheme_re = re.compile(r"^([\w.+-]{2,}:(/){0,2})$")
@pass_eval_context
def do_urlize(
eval_ctx: "EvalContext",
value: str,
trim_url_limit: t.Optional[int] = None,
nofollow: bool = False,
target: t.Optional[str] = None,
rel: t.Optional[str] = None,
extra_schemes: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Convert URLs in text into clickable links.
This may not recognize links in some situations. Usually, a more
comprehensive formatter, such as a Markdown library, is a better
choice.
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
addresses. Links with trailing punctuation (periods, commas, closing
parentheses) and leading punctuation (opening parentheses) are
recognized excluding the punctuation. Email addresses that include
header fields are not recognized (for example,
``mailto:address@example.com?cc=copy@example.com``).
:param value: Original text containing URLs to link.
:param trim_url_limit: Shorten displayed URL values to this length.
:param nofollow: Add the ``rel=nofollow`` attribute to links.
:param target: Add the ``target`` attribute to links.
:param rel: Add the ``rel`` attribute to links.
:param extra_schemes: Recognize URLs that start with these schemes
in addition to the default behavior. Defaults to
``env.policies["urlize.extra_schemes"]``, which defaults to no
extra schemes.
.. versionchanged:: 3.0
The ``extra_schemes`` parameter was added.
.. versionchanged:: 3.0
Generate ``https://`` links for URLs without a scheme.
.. versionchanged:: 3.0
The parsing rules were updated. Recognize email addresses with
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
parentheses and brackets in more cases.
.. versionchanged:: 2.8
The ``target`` parameter was added.
"""
policies = eval_ctx.environment.policies
rel_parts = set((rel or "").split())
if nofollow:
rel_parts.add("nofollow")
rel_parts.update((policies["urlize.rel"] or "").split())
rel = " ".join(sorted(rel_parts)) or None
if target is None:
target = policies["urlize.target"]
if extra_schemes is None:
extra_schemes = policies["urlize.extra_schemes"] or ()
for scheme in extra_schemes:
if _uri_scheme_re.fullmatch(scheme) is None:
raise FilterArgumentError(f"{scheme!r} is not a valid URI scheme prefix.")
rv = urlize(
value,
trim_url_limit=trim_url_limit,
rel=rel,
target=target,
extra_schemes=extra_schemes,
)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(
s: str, width: t.Union[int, str] = 4, first: bool = False, blank: bool = False
) -> str:
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
:param width: Number of spaces, or a string, to indent by.
:param first: Don't skip indenting the first line.
:param blank: Don't skip indenting empty lines.
.. versionchanged:: 3.0
``width`` can be a string.
.. versionchanged:: 2.10
Blank lines are not indented by default.
Rename the ``indentfirst`` argument to ``first``.
"""
if isinstance(width, str):
indention = width
else:
indention = " " * width
newline = "\n"
if isinstance(s, Markup):
indention = Markup(indention)
newline = Markup(newline)
s += newline # this quirk is necessary for splitlines method
if blank:
rv = (newline + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
rv += newline + newline.join(
indention + line if line else line for line in lines
)
if first:
rv = indention + rv
return rv
@pass_environment
def do_truncate(
env: "Environment",
s: str,
length: int = 255,
killwords: bool = False,
end: str = "...",
leeway: t.Optional[int] = None,
) -> str:
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter. Strings that only exceed the length by the tolerance
margin given in the fourth parameter will not be truncated.
.. sourcecode:: jinja
{{ "foo bar baz qux"|truncate(9) }}
-> "foo..."
{{ "foo bar baz qux"|truncate(9, True) }}
-> "foo ba..."
{{ "foo bar baz qux"|truncate(11) }}
-> "foo bar baz qux"
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
The default leeway on newer Jinja versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
leeway = env.policies["truncate.leeway"]
assert length >= len(end), f"expected length >= {len(end)}, got {length}"
assert leeway >= 0, f"expected leeway >= 0, got {leeway}"
if len(s) <= length + leeway:
return s
if killwords:
return s[: length - len(end)] + end
result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
@pass_environment
def do_wordwrap(
environment: "Environment",
s: str,
width: int = 79,
break_long_words: bool = True,
wrapstring: t.Optional[str] = None,
break_on_hyphens: bool = True,
) -> str:
"""Wrap a string to the given width. Existing newlines are treated
as paragraphs to be wrapped separately.
:param s: Original text to wrap.
:param width: Maximum length of wrapped lines.
:param break_long_words: If a word is longer than ``width``, break
it across lines.
:param break_on_hyphens: If a word contains hyphens, it may be split
across lines.
:param wrapstring: String to join each wrapped line. Defaults to
:attr:`Environment.newline_sequence`.
.. versionchanged:: 2.11
Existing newlines are treated as paragraphs wrapped separately.
.. versionchanged:: 2.11
Added the ``break_on_hyphens`` parameter.
.. versionchanged:: 2.7
Added the ``wrapstring`` parameter.
"""
import textwrap
if wrapstring is None:
wrapstring = environment.newline_sequence
# textwrap.wrap doesn't consider existing newlines when wrapping.
# If the string has a newline before width, wrap will still insert
# a newline at width, resulting in a short line. Instead, split and
# wrap each paragraph individually.
return wrapstring.join(
[
wrapstring.join(
textwrap.wrap(
line,
width=width,
expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words,
break_on_hyphens=break_on_hyphens,
)
)
for line in s.splitlines()
]
)
_word_re = re.compile(r"\w+")
def do_wordcount(s: str) -> int:
"""Count the words in that string."""
return len(_word_re.findall(soft_str(s)))
def do_int(value: t.Any, default: int = 0, base: int = 10) -> int:
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter. You
can also override the default base (10) in the second
parameter, which handles input with prefixes such as
0b, 0o and 0x for bases 2, 8 and 16 respectively.
The base is ignored for decimal numbers and non-string values.
"""
try:
if isinstance(value, str):
return int(value, base)
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value: t.Any, default: float = 0.0) -> float:
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value: str, *args: t.Any, **kwargs: t.Any) -> str:
"""Apply the given values to a `printf-style`_ format string, like
``string % values``.
.. sourcecode:: jinja
{{ "%s, %s!"|format(greeting, name) }}
Hello, World!
In most cases it should be more convenient and efficient to use the
``%`` operator or :meth:`str.format`.
.. code-block:: text
{{ "%s, %s!" % (greeting, name) }}
{{ "{}, {}!".format(greeting, name) }}
.. _printf-style: https://docs.python.org/library/stdtypes.html
#printf-style-string-formatting
"""
if args and kwargs:
raise FilterArgumentError(
"can't handle positional and keyword arguments at the same time"
)
return soft_str(value) % (kwargs or args)
def do_trim(value: str, chars: t.Optional[str] = None) -> str:
"""Strip leading and trailing characters, by default whitespace."""
return soft_str(value).strip(chars)
def do_striptags(value: "t.Union[str, HasHTML]") -> str:
"""Strip SGML/XML tags and replace adjacent whitespace by one space."""
if hasattr(value, "__html__"):
value = t.cast("HasHTML", value).__html__()
return Markup(str(value)).striptags()
def sync_do_slice(
value: "t.Collection[V]", slices: int, fill_with: "t.Optional[V]" = None
) -> "t.Iterator[t.List[V]]":
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columnwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
@async_variant(sync_do_slice) # type: ignore
async def do_slice(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
slices: int,
fill_with: t.Optional[t.Any] = None,
) -> "t.Iterator[t.List[V]]":
return sync_do_slice(await auto_to_list(value), slices, fill_with)
def do_batch(
value: "t.Iterable[V]", linecount: int, fill_with: "t.Optional[V]" = None
) -> "t.Iterator[t.List[V]]":
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
tmp: "t.List[V]" = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(
value: float,
precision: int = 0,
method: 'te.Literal["common", "ceil", "floor"]' = "common",
) -> float:
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if method not in {"common", "ceil", "floor"}:
raise FilterArgumentError("method must be common, ceil or floor")
if method == "common":
return round(value, precision)
func = getattr(math, method)
return t.cast(float, func(value * (10**precision)) / (10**precision))
class _GroupTuple(t.NamedTuple):
grouper: t.Any
list: t.List[t.Any]
# Use the regular tuple repr to hide this subclass if users print
# out the value during debugging.
def __repr__(self) -> str:
return tuple.__repr__(self)
def __str__(self) -> str:
return tuple.__str__(self)
@pass_environment
def sync_do_groupby(
environment: "Environment",
value: "t.Iterable[V]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
case_sensitive: bool = False,
) -> "t.List[_GroupTuple]":
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
the values are sorted first so only one group is returned for each
unique value.
For example, a list of ``User`` objects with a ``city`` attribute
can be rendered in groups. In this example, ``grouper`` refers to
the ``city`` value of the group.
.. sourcecode:: html+jinja
<ul>{% for city, items in users|groupby("city") %}
<li>{{ city }}
<ul>{% for user in items %}
<li>{{ user.name }}
{% endfor %}</ul>
</li>
{% endfor %}</ul>
``groupby`` yields namedtuples of ``(grouper, list)``, which
can be used instead of the tuple unpacking above. ``grouper`` is the
value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
<ul>{% for group in users|groupby("city") %}
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
<ul>{% for city, items in users|groupby("city", default="NY") %}
<li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
{% endfor %}</ul>
Like the :func:`~jinja-filters.sort` filter, sorting and grouping is
case-insensitive by default. The ``key`` for each group will have
the case of the first item in that group of values. For example, if
a list of users has cities ``["CA", "NY", "ca"]``, the "CA" group
will have two values. This can be disabled by passing
``case_sensitive=True``.
.. versionchanged:: 3.1
Added the ``case_sensitive`` parameter. Sorting and grouping is
case-insensitive by default, matching other filters that do
comparisons.
.. versionchanged:: 3.0
Added the ``default`` parameter.
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@async_variant(sync_do_groupby) # type: ignore
async def do_groupby(
environment: "Environment",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Union[str, int],
default: t.Optional[t.Any] = None,
case_sensitive: bool = False,
) -> "t.List[_GroupTuple]":
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, await auto_to_list(values))
for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@pass_environment
def sync_do_sum(
environment: "Environment",
iterable: "t.Iterable[V]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The ``attribute`` parameter was added to allow summing up over
attributes. Also the ``start`` parameter was moved on to the right.
"""
if attribute is not None:
iterable = map(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start) # type: ignore[no-any-return, call-overload]
@async_variant(sync_do_sum) # type: ignore
async def do_sum(
environment: "Environment",
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
attribute: t.Optional[t.Union[str, int]] = None,
start: V = 0, # type: ignore
) -> V:
rv = start
if attribute is not None:
func = make_attrgetter(environment, attribute)
else:
def func(x: V) -> V:
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
def sync_do_list(value: "t.Iterable[V]") -> "t.List[V]":
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
@async_variant(sync_do_list) # type: ignore
async def do_list(value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]") -> "t.List[V]":
return await auto_to_list(value)
def do_mark_safe(value: str) -> Markup:
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value: str) -> str:
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return str(value)
@typing.overload
def do_reverse(value: str) -> str:
...
@typing.overload
def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]":
...
def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V]]:
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
if isinstance(value, str):
return value[::-1]
try:
return reversed(value) # type: ignore
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError as e:
raise FilterArgumentError("argument must be iterable") from e
@pass_environment
def do_attr(
environment: "Environment", obj: t.Any, name: str
) -> t.Union[Undefined, t.Any]:
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed:
environment = t.cast("SandboxedEnvironment", environment)
if not environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable[t.Any],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable[t.Any]:
...
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable[t.Any],
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable[t.Any]:
...
@pass_context
def sync_do_map(
context: "Context", value: t.Iterable[t.Any], *args: t.Any, **kwargs: t.Any
) -> t.Iterable[t.Any]:
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
{{ users|map(attribute="username", default="Anonymous")|join(", ") }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
Similar to a generator comprehension such as:
.. code-block:: python
(u.username for u in users)
(getattr(u, "username", "Anonymous") for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
Added the ``default`` parameter.
.. versionadded:: 2.7
"""
if value:
func = prepare_map(context, args, kwargs)
for item in value:
yield func(item)
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable[t.Any], t.Iterable[t.Any]],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable[t.Any]:
...
@typing.overload
def do_map(
context: "Context",
value: t.Union[t.AsyncIterable[t.Any], t.Iterable[t.Any]],
*,
attribute: str = ...,
default: t.Optional[t.Any] = None,
) -> t.Iterable[t.Any]:
...
@async_variant(sync_do_map) # type: ignore
async def do_map(
context: "Context",
value: t.Union[t.AsyncIterable[t.Any], t.Iterable[t.Any]],
*args: t.Any,
**kwargs: t.Any,
) -> t.AsyncIterable[t.Any]:
if value:
func = prepare_map(context, args, kwargs)
async for item in auto_aiter(value):
yield await auto_await(func(item))
@pass_context
def sync_do_select(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
{{ numbers|select("divisibleby", 3) }}
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if test_odd(n))
(n for n in numbers if test_divisibleby(n, 3))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, False)
@async_variant(sync_do_select) # type: ignore
async def do_select(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
@pass_context
def sync_do_reject(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if not test_odd(n))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@async_variant(sync_do_reject) # type: ignore
async def do_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@pass_context
def sync_do_selectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if user.is_active)
(u for user in users if test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, True)
@async_variant(sync_do_selectattr) # type: ignore
async def do_selectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
@pass_context
def sync_do_rejectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(u for user in users if not user.is_active)
(u for user in users if not test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@async_variant(sync_do_rejectattr) # type: ignore
async def do_rejectattr(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@pass_eval_context
def do_tojson(
eval_ctx: "EvalContext", value: t.Any, indent: t.Optional[int] = None
) -> Markup:
"""Serialize an object to a string of JSON, and mark it safe to
render in HTML. This filter is only for use in HTML documents.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param value: The object to serialize to JSON.
:param indent: The ``indent`` parameter passed to ``dumps``, for
pretty-printing the value.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
dumps = policies["json.dumps_function"]
kwargs = policies["json.dumps_kwargs"]
if indent is not None:
kwargs = kwargs.copy()
kwargs["indent"] = indent
return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
def prepare_map(
context: "Context", args: t.Tuple[t.Any, ...], kwargs: t.Dict[str, t.Any]
) -> t.Callable[[t.Any], t.Any]:
if not args and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
if kwargs:
raise FilterArgumentError(
f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[0]
args = args[1:]
except LookupError:
raise FilterArgumentError("map requires a filter argument") from None
def func(item: t.Any) -> t.Any:
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
return func
def prepare_select_or_reject(
context: "Context",
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> t.Callable[[t.Any], t.Any]:
if lookup_attr:
try:
attr = args[0]
except LookupError:
raise FilterArgumentError("Missing parameter for attribute name") from None
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
def transfunc(x: V) -> V:
return x
try:
name = args[off]
args = args[1 + off :]
def func(item: t.Any) -> t.Any:
return context.environment.call_test(name, item, args, kwargs)
except LookupError:
func = bool # type: ignore
return lambda item: modfunc(func(transfunc(item)))
def select_or_reject(
context: "Context",
value: "t.Iterable[V]",
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.Iterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
for item in value:
if func(item):
yield item
async def async_select_or_reject(
context: "Context",
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.AsyncIterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
async for item in auto_aiter(value):
if func(item):
yield item
FILTERS = {
"abs": abs,
"attr": do_attr,
"batch": do_batch,
"capitalize": do_capitalize,
"center": do_center,
"count": len,
"d": do_default,
"default": do_default,
"dictsort": do_dictsort,
"e": escape,
"escape": escape,
"filesizeformat": do_filesizeformat,
"first": do_first,
"float": do_float,
"forceescape": do_forceescape,
"format": do_format,
"groupby": do_groupby,
"indent": do_indent,
"int": do_int,
"join": do_join,
"last": do_last,
"length": len,
"list": do_list,
"lower": do_lower,
"items": do_items,
"map": do_map,
"min": do_min,
"max": do_max,
"pprint": do_pprint,
"random": do_random,
"reject": do_reject,
"rejectattr": do_rejectattr,
"replace": do_replace,
"reverse": do_reverse,
"round": do_round,
"safe": do_mark_safe,
"select": do_select,
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
"string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
"trim": do_trim,
"truncate": do_truncate,
"unique": do_unique,
"upper": do_upper,
"urlencode": do_urlencode,
"urlize": do_urlize,
"wordcount": do_wordcount,
"wordwrap": do_wordwrap,
"xmlattr": do_xmlattr,
"tojson": do_tojson,
}
| 53,707 | 28.110027 | 87 | py |
jinja | jinja-main/src/jinja2/debug.py | import sys
import typing as t
from types import CodeType
from types import TracebackType
from .exceptions import TemplateSyntaxError
from .utils import internal_code
from .utils import missing
if t.TYPE_CHECKING:
from .runtime import Context
def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
"""Rewrite the current exception to replace any tracebacks from
within compiled template code with tracebacks that look like they
came from the template source.
This must be called within an ``except`` block.
:param source: For ``TemplateSyntaxError``, the original source if
known.
:return: The original exception with the rewritten traceback.
"""
_, exc_value, tb = sys.exc_info()
exc_value = t.cast(BaseException, exc_value)
tb = t.cast(TracebackType, tb)
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
exc_value.translated = True
exc_value.source = source
# Remove the old traceback, otherwise the frames from the
# compiler still show up.
exc_value.with_traceback(None)
# Outside of runtime, so the frame isn't executing template
# code, but it still needs to point at the template.
tb = fake_traceback(
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
)
else:
# Skip the frame for the render function.
tb = tb.tb_next
stack = []
# Build the stack of traceback object, replacing any in template
# code with the source file and line information.
while tb is not None:
# Skip frames decorated with @internalcode. These are internal
# calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
template = tb.tb_frame.f_globals.get("__jinja_template__")
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
stack.append(fake_tb)
else:
stack.append(tb)
tb = tb.tb_next
tb_next = None
# Assign tb_next in reverse to avoid circular references.
for tb in reversed(stack):
tb.tb_next = tb_next
tb_next = tb
return exc_value.with_traceback(tb_next)
def fake_traceback( # type: ignore
exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
) -> TracebackType:
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code: CodeType = compile(
"\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
)
# Build a new code object that points to the template file and
# replaces the location with a block name.
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = f"block {function[6:]!r}"
if sys.version_info >= (3, 8):
code = code.replace(co_name=location)
else:
code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
location,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next # type: ignore
def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
"""Based on the runtime locals, get the context that would be
available at that point in the template.
"""
# Start with the current template context.
ctx: "t.Optional[Context]" = real_locals.get("context")
if ctx is not None:
data: t.Dict[str, t.Any] = ctx.get_all().copy()
else:
data = {}
# Might be in a derived context that only sets local variables
# rather than pushing a context. Local variables follow the scheme
# l_depth_name. Find the highest-depth local that has a value for
# each name.
local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
for name, value in real_locals.items():
if not name.startswith("l_") or value is missing:
# Not a template variable, or no longer relevant.
continue
try:
_, depth_str, name = name.split("_", 2)
depth = int(depth_str)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
# Modify the context with any derived context.
for name, (_, value) in local_overrides.items():
if value is missing:
data.pop(name, None)
else:
data[name] = value
return data
| 6,299 | 31.8125 | 87 | py |
jinja | jinja-main/src/jinja2/optimizer.py | """The optimizer tries to constant fold expressions and modify the AST
in place so that it should be faster to evaluate.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example, loop unrolling doesn't work because unrolled loops
would have a different scope. The solution would be a second syntax tree
that stored the scoping rules.
"""
import typing as t
from . import nodes
from .visitor import NodeTransformer
if t.TYPE_CHECKING:
from .environment import Environment
def optimize(node: nodes.Node, environment: "Environment") -> nodes.Node:
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return t.cast(nodes.Node, optimizer.visit(node))
class Optimizer(NodeTransformer):
def __init__(self, environment: "t.Optional[Environment]") -> None:
self.environment = environment
def generic_visit(
self, node: nodes.Node, *args: t.Any, **kwargs: t.Any
) -> nodes.Node:
node = super().generic_visit(node, *args, **kwargs)
# Do constant folding. Some other nodes besides Expr have
# as_const, but folding them causes errors later on.
if isinstance(node, nodes.Expr):
try:
return nodes.Const.from_untrusted(
node.as_const(args[0] if args else None),
lineno=node.lineno,
environment=self.environment,
)
except nodes.Impossible:
pass
return node
| 1,650 | 33.395833 | 73 | py |
jinja | jinja-main/scripts/generate_identifier_pattern.py | import itertools
import os
import re
import sys
def get_characters():
"""Find every Unicode character that is valid in a Python `identifier`_ but
is not matched by the regex ``\\w`` group.
``\\w`` matches some characters that aren't valid in identifiers, but
:meth:`str.isidentifier` will catch that later in lexing.
All start characters are valid continue characters, so we only test for
continue characters.
_identifier: https://docs.python.org/3/reference/lexical_analysis.html#identifiers
"""
for cp in range(sys.maxunicode + 1):
s = chr(cp)
if ("a" + s).isidentifier() and not re.match(r"\w", s):
yield s
def collapse_ranges(data):
"""Given a sorted list of unique characters, generate ranges representing
sequential code points.
Source: https://stackoverflow.com/a/4629241/400617
"""
for _, g in itertools.groupby(enumerate(data), lambda x: ord(x[1]) - x[0]):
lb = list(g)
yield lb[0][1], lb[-1][1]
def build_pattern(ranges):
"""Output the regex pattern for ranges of characters.
One and two character ranges output the individual characters.
"""
out = []
for a, b in ranges:
if a == b: # single char
out.append(a)
elif ord(b) - ord(a) == 1: # two chars, range is redundant
out.append(a)
out.append(b)
else:
out.append(f"{a}-{b}")
return "".join(out)
def main():
"""Build the regex pattern and write it to
``jinja2/_identifier.py``.
"""
pattern = build_pattern(collapse_ranges(get_characters()))
filename = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "src", "jinja2", "_identifier.py")
)
with open(filename, "w", encoding="utf8") as f:
f.write("import re\n\n")
f.write("# generated by scripts/generate_identifier_pattern.py\n")
f.write("pattern = re.compile(\n")
f.write(f' r"[\\w{pattern}]+" # noqa: B950\n')
f.write(")\n")
if __name__ == "__main__":
main()
| 2,085 | 26.813333 | 88 | py |
jinja | jinja-main/tests/test_inheritance.py | import pytest
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import TemplateRuntimeError
from jinja2 import TemplateSyntaxError
LAYOUTTEMPLATE = """\
|{% block block1 %}block 1 from layout{% endblock %}
|{% block block2 %}block 2 from layout{% endblock %}
|{% block block3 %}
{% block block4 %}nested block 4 from layout{% endblock %}
{% endblock %}|"""
LEVEL1TEMPLATE = """\
{% extends "layout" %}
{% block block1 %}block 1 from level1{% endblock %}"""
LEVEL2TEMPLATE = """\
{% extends "level1" %}
{% block block2 %}{% block block5 %}nested block 5 from level2{%
endblock %}{% endblock %}"""
LEVEL3TEMPLATE = """\
{% extends "level2" %}
{% block block5 %}block 5 from level3{% endblock %}
{% block block4 %}block 4 from level3{% endblock %}
"""
LEVEL4TEMPLATE = """\
{% extends "level3" %}
{% block block3 %}block 3 from level4{% endblock %}
"""
WORKINGTEMPLATE = """\
{% extends "layout" %}
{% block block1 %}
{% if false %}
{% block block2 %}
this should work
{% endblock %}
{% endif %}
{% endblock %}
"""
DOUBLEEXTENDS = """\
{% extends "layout" %}
{% extends "layout" %}
{% block block1 %}
{% if false %}
{% block block2 %}
this should work
{% endblock %}
{% endif %}
{% endblock %}
"""
@pytest.fixture
def env():
return Environment(
loader=DictLoader(
{
"layout": LAYOUTTEMPLATE,
"level1": LEVEL1TEMPLATE,
"level2": LEVEL2TEMPLATE,
"level3": LEVEL3TEMPLATE,
"level4": LEVEL4TEMPLATE,
"working": WORKINGTEMPLATE,
"doublee": DOUBLEEXTENDS,
}
),
trim_blocks=True,
)
class TestInheritance:
def test_layout(self, env):
tmpl = env.get_template("layout")
assert tmpl.render() == (
"|block 1 from layout|block 2 from layout|nested block 4 from layout|"
)
def test_level1(self, env):
tmpl = env.get_template("level1")
assert tmpl.render() == (
"|block 1 from level1|block 2 from layout|nested block 4 from layout|"
)
def test_level2(self, env):
tmpl = env.get_template("level2")
assert tmpl.render() == (
"|block 1 from level1|nested block 5 from "
"level2|nested block 4 from layout|"
)
def test_level3(self, env):
tmpl = env.get_template("level3")
assert tmpl.render() == (
"|block 1 from level1|block 5 from level3|block 4 from level3|"
)
def test_level4(self, env):
tmpl = env.get_template("level4")
assert tmpl.render() == (
"|block 1 from level1|block 5 from level3|block 3 from level4|"
)
def test_super(self, env):
env = Environment(
loader=DictLoader(
{
"a": "{% block intro %}INTRO{% endblock %}|"
"BEFORE|{% block data %}INNER{% endblock %}|AFTER",
"b": '{% extends "a" %}{% block data %}({{ '
"super() }}){% endblock %}",
"c": '{% extends "b" %}{% block intro %}--{{ '
"super() }}--{% endblock %}\n{% block data "
"%}[{{ super() }}]{% endblock %}",
}
)
)
tmpl = env.get_template("c")
assert tmpl.render() == "--INTRO--|BEFORE|[(INNER)]|AFTER"
def test_working(self, env):
env.get_template("working")
def test_reuse_blocks(self, env):
tmpl = env.from_string(
"{{ self.foo() }}|{% block foo %}42{% endblock %}|{{ self.foo() }}"
)
assert tmpl.render() == "42|42|42"
def test_preserve_blocks(self, env):
env = Environment(
loader=DictLoader(
{
"a": "{% if false %}{% block x %}A{% endblock %}"
"{% endif %}{{ self.x() }}",
"b": '{% extends "a" %}{% block x %}B{{ super() }}{% endblock %}',
}
)
)
tmpl = env.get_template("b")
assert tmpl.render() == "BA"
def test_dynamic_inheritance(self, env):
env = Environment(
loader=DictLoader(
{
"default1": "DEFAULT1{% block x %}{% endblock %}",
"default2": "DEFAULT2{% block x %}{% endblock %}",
"child": "{% extends default %}{% block x %}CHILD{% endblock %}",
}
)
)
tmpl = env.get_template("child")
for m in range(1, 3):
assert tmpl.render(default=f"default{m}") == f"DEFAULT{m}CHILD"
def test_multi_inheritance(self, env):
env = Environment(
loader=DictLoader(
{
"default1": "DEFAULT1{% block x %}{% endblock %}",
"default2": "DEFAULT2{% block x %}{% endblock %}",
"child": (
"{% if default %}{% extends default %}{% else %}"
"{% extends 'default1' %}{% endif %}"
"{% block x %}CHILD{% endblock %}"
),
}
)
)
tmpl = env.get_template("child")
assert tmpl.render(default="default2") == "DEFAULT2CHILD"
assert tmpl.render(default="default1") == "DEFAULT1CHILD"
assert tmpl.render() == "DEFAULT1CHILD"
def test_scoped_block(self, env):
env = Environment(
loader=DictLoader(
{
"default.html": "{% for item in seq %}[{% block item scoped %}"
"{% endblock %}]{% endfor %}"
}
)
)
t = env.from_string(
"{% extends 'default.html' %}{% block item %}{{ item }}{% endblock %}"
)
assert t.render(seq=list(range(5))) == "[0][1][2][3][4]"
def test_super_in_scoped_block(self, env):
env = Environment(
loader=DictLoader(
{
"default.html": "{% for item in seq %}[{% block item scoped %}"
"{{ item }}{% endblock %}]{% endfor %}"
}
)
)
t = env.from_string(
'{% extends "default.html" %}{% block item %}'
"{{ super() }}|{{ item * 2 }}{% endblock %}"
)
assert t.render(seq=list(range(5))) == "[0|0][1|2][2|4][3|6][4|8]"
def test_scoped_block_after_inheritance(self, env):
env = Environment(
loader=DictLoader(
{
"layout.html": """
{% block useless %}{% endblock %}
""",
"index.html": """
{%- extends 'layout.html' %}
{% from 'helpers.html' import foo with context %}
{% block useless %}
{% for x in [1, 2, 3] %}
{% block testing scoped %}
{{ foo(x) }}
{% endblock %}
{% endfor %}
{% endblock %}
""",
"helpers.html": """
{% macro foo(x) %}{{ the_foo + x }}{% endmacro %}
""",
}
)
)
rv = env.get_template("index.html").render(the_foo=42).split()
assert rv == ["43", "44", "45"]
def test_level1_required(self, env):
env = Environment(
loader=DictLoader(
{
"default": "{% block x required %}{# comment #}\n {% endblock %}",
"level1": "{% extends 'default' %}{% block x %}[1]{% endblock %}",
}
)
)
rv = env.get_template("level1").render()
assert rv == "[1]"
def test_level2_required(self, env):
env = Environment(
loader=DictLoader(
{
"default": "{% block x required %}{% endblock %}",
"level1": "{% extends 'default' %}{% block x %}[1]{% endblock %}",
"level2": "{% extends 'default' %}{% block x %}[2]{% endblock %}",
}
)
)
rv1 = env.get_template("level1").render()
rv2 = env.get_template("level2").render()
assert rv1 == "[1]"
assert rv2 == "[2]"
def test_level3_required(self, env):
env = Environment(
loader=DictLoader(
{
"default": "{% block x required %}{% endblock %}",
"level1": "{% extends 'default' %}",
"level2": "{% extends 'level1' %}{% block x %}[2]{% endblock %}",
"level3": "{% extends 'level2' %}",
}
)
)
t1 = env.get_template("level1")
t2 = env.get_template("level2")
t3 = env.get_template("level3")
with pytest.raises(TemplateRuntimeError, match="Required block 'x' not found"):
assert t1.render()
assert t2.render() == "[2]"
assert t3.render() == "[2]"
def test_invalid_required(self, env):
env = Environment(
loader=DictLoader(
{
"empty": "{% block x required %}{% endblock %}",
"blank": "{% block x required %} {# c #}{% endblock %}",
"text": "{% block x required %}data {# c #}{% endblock %}",
"block": "{% block x required %}{% block y %}"
"{% endblock %}{% endblock %}",
"if": "{% block x required %}{% if true %}"
"{% endif %}{% endblock %}",
"top": "{% extends t %}{% block x %}CHILD{% endblock %}",
}
)
)
t = env.get_template("top")
assert t.render(t="empty") == "CHILD"
assert t.render(t="blank") == "CHILD"
required_block_check = pytest.raises(
TemplateSyntaxError,
match="Required blocks can only contain comments or whitespace",
)
with required_block_check:
t.render(t="text")
with required_block_check:
t.render(t="block")
with required_block_check:
t.render(t="if")
def test_required_with_scope(self, env):
env = Environment(
loader=DictLoader(
{
"default1": "{% for item in seq %}[{% block item scoped required %}"
"{% endblock %}]{% endfor %}",
"child1": "{% extends 'default1' %}{% block item %}"
"{{ item }}{% endblock %}",
"default2": "{% for item in seq %}[{% block item required scoped %}"
"{% endblock %}]{% endfor %}",
"child2": "{% extends 'default2' %}{% block item %}"
"{{ item }}{% endblock %}",
}
)
)
t1 = env.get_template("child1")
t2 = env.get_template("child2")
assert t1.render(seq=list(range(3))) == "[0][1][2]"
# scoped must come before required
with pytest.raises(TemplateSyntaxError):
t2.render(seq=list(range(3)))
def test_duplicate_required_or_scoped(self, env):
env = Environment(
loader=DictLoader(
{
"default1": "{% for item in seq %}[{% block item "
"scoped scoped %}}{{% endblock %}}]{{% endfor %}}",
"default2": "{% for item in seq %}[{% block item "
"required required %}}{{% endblock %}}]{{% endfor %}}",
"child": "{% if default %}{% extends default %}{% else %}"
"{% extends 'default1' %}{% endif %}{%- block x %}"
"CHILD{% endblock %}",
}
)
)
tmpl = env.get_template("child")
with pytest.raises(TemplateSyntaxError):
tmpl.render(default="default1", seq=list(range(3)))
with pytest.raises(TemplateSyntaxError):
tmpl.render(default="default2", seq=list(range(3)))
class TestBugFix:
def test_fixed_macro_scoping_bug(self, env):
assert (
Environment(
loader=DictLoader(
{
"test.html": """\
{% extends 'details.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block inner_box %}
{{ my_macro() }}
{% endblock %}
""",
"details.html": """\
{% extends 'standard.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block content %}
{% block outer_box %}
outer_box
{% block inner_box %}
inner_box
{% endblock %}
{% endblock %}
{% endblock %}
""",
"standard.html": """
{% block content %} {% endblock %}
""",
}
)
)
.get_template("test.html")
.render()
.split()
== ["outer_box", "my_macro"]
)
def test_double_extends(self, env):
"""Ensures that a template with more than 1 {% extends ... %} usage
raises a ``TemplateError``.
"""
with pytest.raises(TemplateRuntimeError, match="extended multiple times"):
env.get_template("doublee").render()
| 13,611 | 31.642686 | 88 | py |
jinja | jinja-main/tests/test_idtracking.py | from jinja2 import nodes
from jinja2.idtracking import symbols_for_node
def test_basics():
for_loop = nodes.For(
nodes.Name("foo", "store"),
nodes.Name("seq", "load"),
[nodes.Output([nodes.Name("foo", "load")])],
[],
None,
False,
)
tmpl = nodes.Template(
[nodes.Assign(nodes.Name("foo", "store"), nodes.Name("bar", "load")), for_loop]
)
sym = symbols_for_node(tmpl)
assert sym.refs == {
"foo": "l_0_foo",
"bar": "l_0_bar",
"seq": "l_0_seq",
}
assert sym.loads == {
"l_0_foo": ("undefined", None),
"l_0_bar": ("resolve", "bar"),
"l_0_seq": ("resolve", "seq"),
}
sym = symbols_for_node(for_loop, sym)
assert sym.refs == {
"foo": "l_1_foo",
}
assert sym.loads == {
"l_1_foo": ("param", None),
}
def test_complex():
title_block = nodes.Block(
"title", [nodes.Output([nodes.TemplateData("Page Title")])], False, False
)
render_title_macro = nodes.Macro(
"render_title",
[nodes.Name("title", "param")],
[],
[
nodes.Output(
[
nodes.TemplateData('\n <div class="title">\n <h1>'),
nodes.Name("title", "load"),
nodes.TemplateData("</h1>\n <p>"),
nodes.Name("subtitle", "load"),
nodes.TemplateData("</p>\n "),
]
),
nodes.Assign(
nodes.Name("subtitle", "store"), nodes.Const("something else")
),
nodes.Output(
[
nodes.TemplateData("\n <p>"),
nodes.Name("subtitle", "load"),
nodes.TemplateData("</p>\n </div>\n"),
nodes.If(
nodes.Name("something", "load"),
[
nodes.Assign(
nodes.Name("title_upper", "store"),
nodes.Filter(
nodes.Name("title", "load"),
"upper",
[],
[],
None,
None,
),
),
nodes.Output(
[
nodes.Name("title_upper", "load"),
nodes.Call(
nodes.Name("render_title", "load"),
[nodes.Const("Aha")],
[],
None,
None,
),
]
),
],
[],
[],
),
]
),
],
)
for_loop = nodes.For(
nodes.Name("item", "store"),
nodes.Name("seq", "load"),
[
nodes.Output(
[
nodes.TemplateData("\n <li>"),
nodes.Name("item", "load"),
nodes.TemplateData("</li>\n <span>"),
]
),
nodes.Include(nodes.Const("helper.html"), True, False),
nodes.Output([nodes.TemplateData("</span>\n ")]),
],
[],
None,
False,
)
body_block = nodes.Block(
"body",
[
nodes.Output(
[
nodes.TemplateData("\n "),
nodes.Call(
nodes.Name("render_title", "load"),
[nodes.Name("item", "load")],
[],
None,
None,
),
nodes.TemplateData("\n <ul>\n "),
]
),
for_loop,
nodes.Output([nodes.TemplateData("\n </ul>\n")]),
],
False,
False,
)
tmpl = nodes.Template(
[
nodes.Extends(nodes.Const("layout.html")),
title_block,
render_title_macro,
body_block,
]
)
tmpl_sym = symbols_for_node(tmpl)
assert tmpl_sym.refs == {
"render_title": "l_0_render_title",
}
assert tmpl_sym.loads == {
"l_0_render_title": ("undefined", None),
}
assert tmpl_sym.stores == {"render_title"}
assert tmpl_sym.dump_stores() == {
"render_title": "l_0_render_title",
}
macro_sym = symbols_for_node(render_title_macro, tmpl_sym)
assert macro_sym.refs == {
"subtitle": "l_1_subtitle",
"something": "l_1_something",
"title": "l_1_title",
"title_upper": "l_1_title_upper",
}
assert macro_sym.loads == {
"l_1_subtitle": ("resolve", "subtitle"),
"l_1_something": ("resolve", "something"),
"l_1_title": ("param", None),
"l_1_title_upper": ("resolve", "title_upper"),
}
assert macro_sym.stores == {"title", "title_upper", "subtitle"}
assert macro_sym.find_ref("render_title") == "l_0_render_title"
assert macro_sym.dump_stores() == {
"title": "l_1_title",
"title_upper": "l_1_title_upper",
"subtitle": "l_1_subtitle",
"render_title": "l_0_render_title",
}
body_sym = symbols_for_node(body_block)
assert body_sym.refs == {
"item": "l_0_item",
"seq": "l_0_seq",
"render_title": "l_0_render_title",
}
assert body_sym.loads == {
"l_0_item": ("resolve", "item"),
"l_0_seq": ("resolve", "seq"),
"l_0_render_title": ("resolve", "render_title"),
}
assert body_sym.stores == set()
for_sym = symbols_for_node(for_loop, body_sym)
assert for_sym.refs == {
"item": "l_1_item",
}
assert for_sym.loads == {
"l_1_item": ("param", None),
}
assert for_sym.stores == {"item"}
assert for_sym.dump_stores() == {
"item": "l_1_item",
}
def test_if_branching_stores():
tmpl = nodes.Template(
[
nodes.If(
nodes.Name("expression", "load"),
[nodes.Assign(nodes.Name("variable", "store"), nodes.Const(42))],
[],
[],
)
]
)
sym = symbols_for_node(tmpl)
assert sym.refs == {"variable": "l_0_variable", "expression": "l_0_expression"}
assert sym.stores == {"variable"}
assert sym.loads == {
"l_0_variable": ("resolve", "variable"),
"l_0_expression": ("resolve", "expression"),
}
assert sym.dump_stores() == {
"variable": "l_0_variable",
}
def test_if_branching_stores_undefined():
tmpl = nodes.Template(
[
nodes.Assign(nodes.Name("variable", "store"), nodes.Const(23)),
nodes.If(
nodes.Name("expression", "load"),
[nodes.Assign(nodes.Name("variable", "store"), nodes.Const(42))],
[],
[],
),
]
)
sym = symbols_for_node(tmpl)
assert sym.refs == {"variable": "l_0_variable", "expression": "l_0_expression"}
assert sym.stores == {"variable"}
assert sym.loads == {
"l_0_variable": ("undefined", None),
"l_0_expression": ("resolve", "expression"),
}
assert sym.dump_stores() == {
"variable": "l_0_variable",
}
def test_if_branching_multi_scope():
for_loop = nodes.For(
nodes.Name("item", "store"),
nodes.Name("seq", "load"),
[
nodes.If(
nodes.Name("expression", "load"),
[nodes.Assign(nodes.Name("x", "store"), nodes.Const(42))],
[],
[],
),
nodes.Include(nodes.Const("helper.html"), True, False),
],
[],
None,
False,
)
tmpl = nodes.Template(
[nodes.Assign(nodes.Name("x", "store"), nodes.Const(23)), for_loop]
)
tmpl_sym = symbols_for_node(tmpl)
for_sym = symbols_for_node(for_loop, tmpl_sym)
assert for_sym.stores == {"item", "x"}
assert for_sym.loads == {
"l_1_x": ("alias", "l_0_x"),
"l_1_item": ("param", None),
"l_1_expression": ("resolve", "expression"),
}
| 8,653 | 28.738832 | 87 | py |
jinja | jinja-main/tests/test_lexnparse.py | import pytest
from jinja2 import Environment
from jinja2 import nodes
from jinja2 import Template
from jinja2 import TemplateSyntaxError
from jinja2 import UndefinedError
from jinja2.lexer import Token
from jinja2.lexer import TOKEN_BLOCK_BEGIN
from jinja2.lexer import TOKEN_BLOCK_END
from jinja2.lexer import TOKEN_EOF
from jinja2.lexer import TokenStream
class TestTokenStream:
test_tokens = [
Token(1, TOKEN_BLOCK_BEGIN, ""),
Token(2, TOKEN_BLOCK_END, ""),
]
def test_simple(self, env):
ts = TokenStream(self.test_tokens, "foo", "bar")
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert not bool(ts)
assert bool(ts.eos)
def test_iter(self, env):
token_types = [t.type for t in TokenStream(self.test_tokens, "foo", "bar")]
assert token_types == [
"block_begin",
"block_end",
]
class TestLexer:
def test_raw1(self, env):
tmpl = env.from_string(
"{% raw %}foo{% endraw %}|"
"{%raw%}{{ bar }}|{% baz %}{% endraw %}"
)
assert tmpl.render() == "foo|{{ bar }}|{% baz %}"
def test_raw2(self, env):
tmpl = env.from_string("1 {%- raw -%} 2 {%- endraw -%} 3")
assert tmpl.render() == "123"
def test_raw3(self, env):
# The second newline after baz exists because it is AFTER the
# {% raw %} and is ignored.
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string("bar\n{% raw %}\n {{baz}}2 spaces\n{% endraw %}\nfoo")
assert tmpl.render(baz="test") == "bar\n\n {{baz}}2 spaces\nfoo"
def test_raw4(self, env):
# The trailing dash of the {% raw -%} cleans both the spaces and
# newlines up to the first character of data.
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
"bar\n{%- raw -%}\n\n \n 2 spaces\n space{%- endraw -%}\nfoo"
)
assert tmpl.render() == "bar2 spaces\n spacefoo"
def test_balancing(self, env):
env = Environment("{%", "%}", "${", "}")
tmpl = env.from_string(
"""{% for item in seq
%}${{'foo': item}|upper}{% endfor %}"""
)
assert tmpl.render(seq=list(range(3))) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}"
def test_comments(self, env):
env = Environment("<!--", "-->", "{", "}")
tmpl = env.from_string(
"""\
<ul>
<!--- for item in seq -->
<li>{item}</li>
<!--- endfor -->
</ul>"""
)
assert tmpl.render(seq=list(range(3))) == (
"<ul>\n <li>0</li>\n <li>1</li>\n <li>2</li>\n</ul>"
)
def test_string_escapes(self, env):
for char in "\0", "\u2668", "\xe4", "\t", "\r", "\n":
tmpl = env.from_string(f"{{{{ {char!r} }}}}")
assert tmpl.render() == char
assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == "\u2668"
def test_bytefallback(self, env):
from pprint import pformat
tmpl = env.from_string("""{{ 'foo'|pprint }}|{{ 'bär'|pprint }}""")
assert tmpl.render() == pformat("foo") + "|" + pformat("bär")
def test_operators(self, env):
from jinja2.lexer import operators
for test, expect in operators.items():
if test in "([{}])":
continue
stream = env.lexer.tokenize(f"{{{{ {test} }}}}")
next(stream)
assert stream.current.type == expect
def test_normalizing(self, env):
for seq in "\r", "\r\n", "\n":
env = Environment(newline_sequence=seq)
tmpl = env.from_string("1\n2\r\n3\n4\n")
result = tmpl.render()
assert result.replace(seq, "X") == "1X2X3X4"
def test_trailing_newline(self, env):
for keep in [True, False]:
env = Environment(keep_trailing_newline=keep)
for template, expected in [
("", {}),
("no\nnewline", {}),
("with\nnewline\n", {False: "with\nnewline"}),
("with\nseveral\n\n\n", {False: "with\nseveral\n\n"}),
]:
tmpl = env.from_string(template)
expect = expected.get(keep, template)
result = tmpl.render()
assert result == expect, (keep, template, result, expect)
@pytest.mark.parametrize(
("name", "valid"),
[
("foo", True),
("föö", True),
("き", True),
("_", True),
("1a", False), # invalid ascii start
("a-", False), # invalid ascii continue
("\U0001f40da", False), # invalid unicode start
("a🐍\U0001f40d", False), # invalid unicode continue
# start characters not matched by \w
("\u1885", True),
("\u1886", True),
("\u2118", True),
("\u212e", True),
# continue character not matched by \w
("\xb7", False),
("a\xb7", True),
],
)
def test_name(self, env, name, valid):
t = "{{ " + name + " }}"
if valid:
# valid for version being tested, shouldn't raise
env.from_string(t)
else:
pytest.raises(TemplateSyntaxError, env.from_string, t)
def test_lineno_with_strip(self, env):
tokens = env.lex(
"""\
<html>
<body>
{%- block content -%}
<hr>
{{ item }}
{% endblock %}
</body>
</html>"""
)
for tok in tokens:
lineno, token_type, value = tok
if token_type == "name" and value == "item":
assert lineno == 5
break
class TestParser:
def test_php_syntax(self, env):
env = Environment("<?", "?>", "<?=", "?>", "<!--", "-->")
tmpl = env.from_string(
"""\
<!-- I'm a comment, I'm not interesting -->\
<? for item in seq -?>
<?= item ?>
<?- endfor ?>"""
)
assert tmpl.render(seq=list(range(5))) == "01234"
def test_erb_syntax(self, env):
env = Environment("<%", "%>", "<%=", "%>", "<%#", "%>")
tmpl = env.from_string(
"""\
<%# I'm a comment, I'm not interesting %>\
<% for item in seq -%>
<%= item %>
<%- endfor %>"""
)
assert tmpl.render(seq=list(range(5))) == "01234"
def test_comment_syntax(self, env):
env = Environment("<!--", "-->", "${", "}", "<!--#", "-->")
tmpl = env.from_string(
"""\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->"""
)
assert tmpl.render(seq=list(range(5))) == "01234"
def test_balancing(self, env):
tmpl = env.from_string("""{{{'foo':'bar'}.foo}}""")
assert tmpl.render() == "bar"
def test_start_comment(self, env):
tmpl = env.from_string(
"""{# foo comment
and bar comment #}
{% macro blub() %}foo{% endmacro %}
{{ blub() }}"""
)
assert tmpl.render().strip() == "foo"
def test_line_syntax(self, env):
env = Environment("<%", "%>", "${", "}", "<%#", "%>", "%")
tmpl = env.from_string(
"""\
<%# regular comment %>
% for item in seq:
${item}
% endfor"""
)
assert [
int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()
] == list(range(5))
env = Environment("<%", "%>", "${", "}", "<%#", "%>", "%", "##")
tmpl = env.from_string(
"""\
<%# regular comment %>
% for item in seq:
${item} ## the rest of the stuff
% endfor"""
)
assert [
int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()
] == list(range(5))
def test_line_syntax_priority(self, env):
# XXX: why is the whitespace there in front of the newline?
env = Environment("{%", "%}", "${", "}", "/*", "*/", "##", "#")
tmpl = env.from_string(
"""\
/* ignore me.
I'm a multiline comment */
## for item in seq:
* ${item} # this is just extra stuff
## endfor"""
)
assert tmpl.render(seq=[1, 2]).strip() == "* 1\n* 2"
env = Environment("{%", "%}", "${", "}", "/*", "*/", "#", "##")
tmpl = env.from_string(
"""\
/* ignore me.
I'm a multiline comment */
# for item in seq:
* ${item} ## this is just extra stuff
## extra stuff i just want to ignore
# endfor"""
)
assert tmpl.render(seq=[1, 2]).strip() == "* 1\n\n* 2"
def test_error_messages(self, env):
def assert_error(code, expected):
with pytest.raises(TemplateSyntaxError, match=expected):
Template(code)
assert_error(
"{% for item in seq %}...{% endif %}",
"Encountered unknown tag 'endif'. Jinja was looking "
"for the following tags: 'endfor' or 'else'. The "
"innermost block that needs to be closed is 'for'.",
)
assert_error(
"{% if foo %}{% for item in seq %}...{% endfor %}{% endfor %}",
"Encountered unknown tag 'endfor'. Jinja was looking for "
"the following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.",
)
assert_error(
"{% if foo %}",
"Unexpected end of template. Jinja was looking for the "
"following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.",
)
assert_error(
"{% for item in seq %}",
"Unexpected end of template. Jinja was looking for the "
"following tags: 'endfor' or 'else'. The innermost block "
"that needs to be closed is 'for'.",
)
assert_error(
"{% block foo-bar-baz %}",
"Block names in Jinja have to be valid Python identifiers "
"and may not contain hyphens, use an underscore instead.",
)
assert_error("{% unknown_tag %}", "Encountered unknown tag 'unknown_tag'.")
class TestSyntax:
def test_call(self, env):
env = Environment()
env.globals["foo"] = lambda a, b, c, e, g: a + b + c + e + g
tmpl = env.from_string("{{ foo('a', c='d', e='f', *['b'], **{'g': 'h'}) }}")
assert tmpl.render() == "abdfh"
def test_slicing(self, env):
tmpl = env.from_string("{{ [1, 2, 3][:] }}|{{ [1, 2, 3][::-1] }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_attr(self, env):
tmpl = env.from_string("{{ foo.bar }}|{{ foo['bar'] }}")
assert tmpl.render(foo={"bar": 42}) == "42|42"
def test_subscript(self, env):
tmpl = env.from_string("{{ foo[0] }}|{{ foo[-1] }}")
assert tmpl.render(foo=[0, 1, 2]) == "0|2"
def test_tuple(self, env):
tmpl = env.from_string("{{ () }}|{{ (1,) }}|{{ (1, 2) }}")
assert tmpl.render() == "()|(1,)|(1, 2)"
def test_math(self, env):
tmpl = env.from_string("{{ (1 + 1 * 2) - 3 / 2 }}|{{ 2**3 }}")
assert tmpl.render() == "1.5|8"
def test_div(self, env):
tmpl = env.from_string("{{ 3 // 2 }}|{{ 3 / 2 }}|{{ 3 % 2 }}")
assert tmpl.render() == "1|1.5|1"
def test_unary(self, env):
tmpl = env.from_string("{{ +3 }}|{{ -3 }}")
assert tmpl.render() == "3|-3"
def test_concat(self, env):
tmpl = env.from_string("{{ [1, 2] ~ 'foo' }}")
assert tmpl.render() == "[1, 2]foo"
@pytest.mark.parametrize(
("a", "op", "b"),
[
(1, ">", 0),
(1, ">=", 1),
(2, "<", 3),
(3, "<=", 4),
(4, "==", 4),
(4, "!=", 5),
],
)
def test_compare(self, env, a, op, b):
t = env.from_string(f"{{{{ {a} {op} {b} }}}}")
assert t.render() == "True"
def test_compare_parens(self, env):
t = env.from_string("{{ i * (j < 5) }}")
assert t.render(i=2, j=3) == "2"
@pytest.mark.parametrize(
("src", "expect"),
[
("{{ 4 < 2 < 3 }}", "False"),
("{{ a < b < c }}", "False"),
("{{ 4 > 2 > 3 }}", "False"),
("{{ a > b > c }}", "False"),
("{{ 4 > 2 < 3 }}", "True"),
("{{ a > b < c }}", "True"),
],
)
def test_compare_compound(self, env, src, expect):
t = env.from_string(src)
assert t.render(a=4, b=2, c=3) == expect
def test_inop(self, env):
tmpl = env.from_string("{{ 1 in [1, 2, 3] }}|{{ 1 not in [1, 2, 3] }}")
assert tmpl.render() == "True|False"
@pytest.mark.parametrize("value", ("[]", "{}", "()"))
def test_collection_literal(self, env, value):
t = env.from_string(f"{{{{ {value} }}}}")
assert t.render() == value
@pytest.mark.parametrize(
("value", "expect"),
(
("1", "1"),
("123", "123"),
("12_34_56", "123456"),
("1.2", "1.2"),
("34.56", "34.56"),
("3_4.5_6", "34.56"),
("1e0", "1.0"),
("10e1", "100.0"),
("2.5e100", "2.5e+100"),
("2.5e+100", "2.5e+100"),
("25.6e-10", "2.56e-09"),
("1_2.3_4e5_6", "1.234e+57"),
("0", "0"),
("0_00", "0"),
("0b1001_1111", "159"),
("0o123", "83"),
("0o1_23", "83"),
("0x123abc", "1194684"),
("0x12_3abc", "1194684"),
),
)
def test_numeric_literal(self, env, value, expect):
t = env.from_string(f"{{{{ {value} }}}}")
assert t.render() == expect
def test_bool(self, env):
tmpl = env.from_string(
"{{ true and false }}|{{ false or true }}|{{ not false }}"
)
assert tmpl.render() == "False|True|True"
def test_grouping(self, env):
tmpl = env.from_string(
"{{ (true and false) or (false and true) and not false }}"
)
assert tmpl.render() == "False"
def test_django_attr(self, env):
tmpl = env.from_string("{{ [1, 2, 3].0 }}|{{ [[1]].0.0 }}")
assert tmpl.render() == "1|1"
def test_conditional_expression(self, env):
tmpl = env.from_string("""{{ 0 if true else 1 }}""")
assert tmpl.render() == "0"
def test_short_conditional_expression(self, env):
tmpl = env.from_string("<{{ 1 if false }}>")
assert tmpl.render() == "<>"
tmpl = env.from_string("<{{ (1 if false).bar }}>")
pytest.raises(UndefinedError, tmpl.render)
def test_filter_priority(self, env):
tmpl = env.from_string('{{ "foo"|upper + "bar"|upper }}')
assert tmpl.render() == "FOOBAR"
def test_function_calls(self, env):
tests = [
(True, "*foo, bar"),
(True, "*foo, *bar"),
(True, "**foo, *bar"),
(True, "**foo, bar"),
(True, "**foo, **bar"),
(True, "**foo, bar=42"),
(False, "foo, bar"),
(False, "foo, bar=42"),
(False, "foo, bar=23, *args"),
(False, "foo, *args, bar=23"),
(False, "a, b=c, *d, **e"),
(False, "*foo, bar=42"),
(False, "*foo, **bar"),
(False, "*foo, bar=42, **baz"),
(False, "foo, *args, bar=23, **baz"),
]
for should_fail, sig in tests:
if should_fail:
with pytest.raises(TemplateSyntaxError):
env.from_string(f"{{{{ foo({sig}) }}}}")
else:
env.from_string(f"foo({sig})")
def test_tuple_expr(self, env):
for tmpl in [
"{{ () }}",
"{{ (1, 2) }}",
"{{ (1, 2,) }}",
"{{ 1, }}",
"{{ 1, 2 }}",
"{% for foo, bar in seq %}...{% endfor %}",
"{% for x in foo, bar %}...{% endfor %}",
"{% for x in foo, %}...{% endfor %}",
]:
assert env.from_string(tmpl)
def test_trailing_comma(self, env):
tmpl = env.from_string("{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}")
assert tmpl.render().lower() == "(1, 2)|[1, 2]|{1: 2}"
def test_block_end_name(self, env):
env.from_string("{% block foo %}...{% endblock foo %}")
pytest.raises(
TemplateSyntaxError, env.from_string, "{% block x %}{% endblock y %}"
)
def test_constant_casing(self, env):
for const in True, False, None:
const = str(const)
tmpl = env.from_string(
f"{{{{ {const} }}}}|{{{{ {const.lower()} }}}}|{{{{ {const.upper()} }}}}"
)
assert tmpl.render() == f"{const}|{const}|"
def test_test_chaining(self, env):
pytest.raises(
TemplateSyntaxError, env.from_string, "{{ foo is string is sequence }}"
)
assert env.from_string("{{ 42 is string or 42 is number }}").render() == "True"
def test_string_concatenation(self, env):
tmpl = env.from_string('{{ "foo" "bar" "baz" }}')
assert tmpl.render() == "foobarbaz"
def test_notin(self, env):
bar = range(100)
tmpl = env.from_string("""{{ not 42 in bar }}""")
assert tmpl.render(bar=bar) == "False"
def test_operator_precedence(self, env):
tmpl = env.from_string("""{{ 2 * 3 + 4 % 2 + 1 - 2 }}""")
assert tmpl.render() == "5"
def test_implicit_subscribed_tuple(self, env):
class Foo:
def __getitem__(self, x):
return x
t = env.from_string("{{ foo[1, 2] }}")
assert t.render(foo=Foo()) == "(1, 2)"
def test_raw2(self, env):
tmpl = env.from_string("{% raw %}{{ FOO }} and {% BAR %}{% endraw %}")
assert tmpl.render() == "{{ FOO }} and {% BAR %}"
def test_const(self, env):
tmpl = env.from_string(
"{{ true }}|{{ false }}|{{ none }}|"
"{{ none is defined }}|{{ missing is defined }}"
)
assert tmpl.render() == "True|False|None|True|False"
def test_neg_filter_priority(self, env):
node = env.parse("{{ -1|foo }}")
assert isinstance(node.body[0].nodes[0], nodes.Filter)
assert isinstance(node.body[0].nodes[0].node, nodes.Neg)
def test_const_assign(self, env):
constass1 = """{% set true = 42 %}"""
constass2 = """{% for none in seq %}{% endfor %}"""
for tmpl in constass1, constass2:
pytest.raises(TemplateSyntaxError, env.from_string, tmpl)
def test_localset(self, env):
tmpl = env.from_string(
"""{% set foo = 0 %}\
{% for item in [1, 2] %}{% set foo = 1 %}{% endfor %}\
{{ foo }}"""
)
assert tmpl.render() == "0"
def test_parse_unary(self, env):
tmpl = env.from_string('{{ -foo["bar"] }}')
assert tmpl.render(foo={"bar": 42}) == "-42"
tmpl = env.from_string('{{ -foo["bar"]|abs }}')
assert tmpl.render(foo={"bar": 42}) == "42"
class TestLstripBlocks:
def test_lstrip(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(""" {% if True %}\n {% endif %}""")
assert tmpl.render() == "\n"
def test_lstrip_trim(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(""" {% if True %}\n {% endif %}""")
assert tmpl.render() == ""
def test_no_lstrip(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(""" {%+ if True %}\n {%+ endif %}""")
assert tmpl.render() == " \n "
def test_lstrip_blocks_false_with_no_lstrip(self, env):
# Test that + is a NOP (but does not cause an error) if lstrip_blocks=False
env = Environment(lstrip_blocks=False, trim_blocks=False)
tmpl = env.from_string(""" {% if True %}\n {% endif %}""")
assert tmpl.render() == " \n "
tmpl = env.from_string(""" {%+ if True %}\n {%+ endif %}""")
assert tmpl.render() == " \n "
def test_lstrip_endline(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(""" hello{% if True %}\n goodbye{% endif %}""")
assert tmpl.render() == " hello\n goodbye"
def test_lstrip_inline(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(""" {% if True %}hello {% endif %}""")
assert tmpl.render() == "hello "
def test_lstrip_nested(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
""" {% if True %}a {% if True %}b {% endif %}c {% endif %}"""
)
assert tmpl.render() == "a b c "
def test_lstrip_left_chars(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
""" abc {% if True %}
hello{% endif %}"""
)
assert tmpl.render() == " abc \n hello"
def test_lstrip_embeded_strings(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(""" {% set x = " {% str %} " %}{{ x }}""")
assert tmpl.render() == " {% str %} "
def test_lstrip_preserve_leading_newlines(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string("""\n\n\n{% set hello = 1 %}""")
assert tmpl.render() == "\n\n\n"
def test_lstrip_comment(self, env):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
""" {# if True #}
hello
{#endif#}"""
)
assert tmpl.render() == "\nhello\n"
def test_lstrip_angle_bracket_simple(self, env):
env = Environment(
"<%",
"%>",
"${",
"}",
"<%#",
"%>",
"%",
"##",
lstrip_blocks=True,
trim_blocks=True,
)
tmpl = env.from_string(""" <% if True %>hello <% endif %>""")
assert tmpl.render() == "hello "
def test_lstrip_angle_bracket_comment(self, env):
env = Environment(
"<%",
"%>",
"${",
"}",
"<%#",
"%>",
"%",
"##",
lstrip_blocks=True,
trim_blocks=True,
)
tmpl = env.from_string(""" <%# if True %>hello <%# endif %>""")
assert tmpl.render() == "hello "
def test_lstrip_angle_bracket(self, env):
env = Environment(
"<%",
"%>",
"${",
"}",
"<%#",
"%>",
"%",
"##",
lstrip_blocks=True,
trim_blocks=True,
)
tmpl = env.from_string(
"""\
<%# regular comment %>
<% for item in seq %>
${item} ## the rest of the stuff
<% endfor %>"""
)
assert tmpl.render(seq=range(5)) == "".join(f"{x}\n" for x in range(5))
def test_lstrip_angle_bracket_compact(self, env):
env = Environment(
"<%",
"%>",
"${",
"}",
"<%#",
"%>",
"%",
"##",
lstrip_blocks=True,
trim_blocks=True,
)
tmpl = env.from_string(
"""\
<%#regular comment%>
<%for item in seq%>
${item} ## the rest of the stuff
<%endfor%>"""
)
assert tmpl.render(seq=range(5)) == "".join(f"{x}\n" for x in range(5))
def test_lstrip_blocks_outside_with_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
" {% if kvs %}(\n"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}\n"
" ){% endif %}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == "(\na=1 b=2 \n )"
def test_lstrip_trim_blocks_outside_with_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(
" {% if kvs %}(\n"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}\n"
" ){% endif %}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == "(\na=1 b=2 )"
def test_lstrip_blocks_inside_with_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
" ({% if kvs %}\n"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}\n"
" {% endif %})"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == " (\na=1 b=2 \n)"
def test_lstrip_trim_blocks_inside_with_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(
" ({% if kvs %}\n"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}\n"
" {% endif %})"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == " (a=1 b=2 )"
def test_lstrip_blocks_without_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
" {% if kvs %}"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}"
" {% endif %}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == " a=1 b=2 "
def test_lstrip_trim_blocks_without_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(
" {% if kvs %}"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor %}"
" {% endif %}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == " a=1 b=2 "
def test_lstrip_blocks_consume_after_without_new_line(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(
" {% if kvs -%}"
" {% for k, v in kvs %}{{ k }}={{ v }} {% endfor -%}"
" {% endif -%}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == "a=1 b=2 "
def test_lstrip_trim_blocks_consume_before_without_new_line(self):
env = Environment(lstrip_blocks=False, trim_blocks=False)
tmpl = env.from_string(
" {%- if kvs %}"
" {%- for k, v in kvs %}{{ k }}={{ v }} {% endfor -%}"
" {%- endif %}"
)
out = tmpl.render(kvs=[("a", 1), ("b", 2)])
assert out == "a=1 b=2 "
def test_lstrip_trim_blocks_comment(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(" {# 1 space #}\n {# 2 spaces #} {# 4 spaces #}")
out = tmpl.render()
assert out == " " * 4
def test_lstrip_trim_blocks_raw(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string("{{x}}\n{%- raw %} {% endraw -%}\n{{ y }}")
out = tmpl.render(x=1, y=2)
assert out == "1 2"
def test_php_syntax_with_manual(self, env):
env = Environment(
"<?", "?>", "<?=", "?>", "<!--", "-->", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq -?>
<?= item ?>
<?- endfor ?>"""
)
assert tmpl.render(seq=range(5)) == "01234"
def test_php_syntax(self, env):
env = Environment(
"<?", "?>", "<?=", "?>", "<!--", "-->", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq ?>
<?= item ?>
<? endfor ?>"""
)
assert tmpl.render(seq=range(5)) == "".join(f" {x}\n" for x in range(5))
def test_php_syntax_compact(self, env):
env = Environment(
"<?", "?>", "<?=", "?>", "<!--", "-->", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<!-- I'm a comment, I'm not interesting -->
<?for item in seq?>
<?=item?>
<?endfor?>"""
)
assert tmpl.render(seq=range(5)) == "".join(f" {x}\n" for x in range(5))
def test_erb_syntax(self, env):
env = Environment(
"<%", "%>", "<%=", "%>", "<%#", "%>", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<%# I'm a comment, I'm not interesting %>
<% for item in seq %>
<%= item %>
<% endfor %>
"""
)
assert tmpl.render(seq=range(5)) == "".join(f" {x}\n" for x in range(5))
def test_erb_syntax_with_manual(self, env):
env = Environment(
"<%", "%>", "<%=", "%>", "<%#", "%>", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<%# I'm a comment, I'm not interesting %>
<% for item in seq -%>
<%= item %>
<%- endfor %>"""
)
assert tmpl.render(seq=range(5)) == "01234"
def test_erb_syntax_no_lstrip(self, env):
env = Environment(
"<%", "%>", "<%=", "%>", "<%#", "%>", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(
"""\
<%# I'm a comment, I'm not interesting %>
<%+ for item in seq -%>
<%= item %>
<%- endfor %>"""
)
assert tmpl.render(seq=range(5)) == " 01234"
def test_comment_syntax(self, env):
env = Environment(
"<!--",
"-->",
"${",
"}",
"<!--#",
"-->",
lstrip_blocks=True,
trim_blocks=True,
)
tmpl = env.from_string(
"""\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->"""
)
assert tmpl.render(seq=range(5)) == "01234"
class TestTrimBlocks:
def test_trim(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=False)
tmpl = env.from_string(" {% if True %}\n {% endif %}")
assert tmpl.render() == " "
def test_no_trim(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=False)
tmpl = env.from_string(" {% if True +%}\n {% endif %}")
assert tmpl.render() == " \n "
def test_no_trim_outer(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=False)
tmpl = env.from_string("{% if True %}X{% endif +%}\nmore things")
assert tmpl.render() == "X\nmore things"
def test_lstrip_no_trim(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(" {% if True +%}\n {% endif %}")
assert tmpl.render() == "\n"
def test_trim_blocks_false_with_no_trim(self, env):
# Test that + is a NOP (but does not cause an error) if trim_blocks=False
env = Environment(trim_blocks=False, lstrip_blocks=False)
tmpl = env.from_string(" {% if True %}\n {% endif %}")
assert tmpl.render() == " \n "
tmpl = env.from_string(" {% if True +%}\n {% endif %}")
assert tmpl.render() == " \n "
tmpl = env.from_string(" {# comment #}\n ")
assert tmpl.render() == " \n "
tmpl = env.from_string(" {# comment +#}\n ")
assert tmpl.render() == " \n "
tmpl = env.from_string(" {% raw %}{% endraw %}\n ")
assert tmpl.render() == " \n "
tmpl = env.from_string(" {% raw %}{% endraw +%}\n ")
assert tmpl.render() == " \n "
def test_trim_nested(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(
" {% if True %}\na {% if True %}\nb {% endif %}\nc {% endif %}"
)
assert tmpl.render() == "a b c "
def test_no_trim_nested(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(
" {% if True +%}\na {% if True +%}\nb {% endif +%}\nc {% endif %}"
)
assert tmpl.render() == "\na \nb \nc "
def test_comment_trim(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(""" {# comment #}\n\n """)
assert tmpl.render() == "\n "
def test_comment_no_trim(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(""" {# comment +#}\n\n """)
assert tmpl.render() == "\n\n "
def test_multiple_comment_trim_lstrip(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(
" {# comment #}\n\n{# comment2 #}\n \n{# comment3 #}\n\n "
)
assert tmpl.render() == "\n \n\n "
def test_multiple_comment_no_trim_lstrip(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string(
" {# comment +#}\n\n{# comment2 +#}\n \n{# comment3 +#}\n\n "
)
assert tmpl.render() == "\n\n\n \n\n\n "
def test_raw_trim_lstrip(self, env):
env = Environment(trim_blocks=True, lstrip_blocks=True)
tmpl = env.from_string("{{x}}{% raw %}\n\n {% endraw %}\n\n{{ y }}")
assert tmpl.render(x=1, y=2) == "1\n\n\n2"
def test_raw_no_trim_lstrip(self, env):
env = Environment(trim_blocks=False, lstrip_blocks=True)
tmpl = env.from_string("{{x}}{% raw %}\n\n {% endraw +%}\n\n{{ y }}")
assert tmpl.render(x=1, y=2) == "1\n\n\n\n2"
# raw blocks do not process inner text, so start tag cannot ignore trim
with pytest.raises(TemplateSyntaxError):
tmpl = env.from_string("{{x}}{% raw +%}\n\n {% endraw +%}\n\n{{ y }}")
def test_no_trim_angle_bracket(self, env):
env = Environment(
"<%", "%>", "${", "}", "<%#", "%>", lstrip_blocks=True, trim_blocks=True
)
tmpl = env.from_string(" <% if True +%>\n\n <% endif %>")
assert tmpl.render() == "\n\n"
tmpl = env.from_string(" <%# comment +%>\n\n ")
assert tmpl.render() == "\n\n "
def test_no_trim_php_syntax(self, env):
env = Environment(
"<?",
"?>",
"<?=",
"?>",
"<!--",
"-->",
lstrip_blocks=False,
trim_blocks=True,
)
tmpl = env.from_string(" <? if True +?>\n\n <? endif ?>")
assert tmpl.render() == " \n\n "
tmpl = env.from_string(" <!-- comment +-->\n\n ")
assert tmpl.render() == " \n\n "
| 35,464 | 33.398642 | 88 | py |
jinja | jinja-main/tests/test_regression.py | import pytest
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import PrefixLoader
from jinja2 import Template
from jinja2 import TemplateAssertionError
from jinja2 import TemplateNotFound
from jinja2 import TemplateSyntaxError
from jinja2.utils import pass_context
class TestCorner:
def test_assigned_scoping(self, env):
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
"""
)
assert t.render(item=42) == "[1][2][3][4]42"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{%- set item = 42 %}
{{- item -}}
"""
)
assert t.render() == "[1][2][3][4]42"
t = env.from_string(
"""
{%- set item = 42 %}
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
"""
)
assert t.render() == "[1][2][3][4]42"
def test_closure_scoping(self, env):
t = env.from_string(
"""
{%- set wrapper = "<FOO>" %}
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
"""
)
assert t.render() == "[1][2][3][4]<FOO>"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{%- set wrapper = "<FOO>" %}
{{- wrapper -}}
"""
)
assert t.render() == "[1][2][3][4]<FOO>"
t = env.from_string(
"""
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
"""
)
assert t.render(wrapper=23) == "[1][2][3][4]23"
class TestBug:
def test_keyword_folding(self, env):
env = Environment()
env.filters["testing"] = lambda value, some: value + some
assert (
env.from_string("{{ 'test'|testing(some='stuff') }}").render()
== "teststuff"
)
def test_extends_output_bugs(self, env):
env = Environment(
loader=DictLoader({"parent.html": "(({% block title %}{% endblock %}))"})
)
t = env.from_string(
'{% if expr %}{% extends "parent.html" %}{% endif %}'
"[[{% block title %}title{% endblock %}]]"
"{% for item in [1, 2, 3] %}({{ item }}){% endfor %}"
)
assert t.render(expr=False) == "[[title]](1)(2)(3)"
assert t.render(expr=True) == "((title))"
def test_urlize_filter_escaping(self, env):
tmpl = env.from_string('{{ "http://www.example.org/<foo"|urlize }}')
assert (
tmpl.render() == '<a href="http://www.example.org/<foo" rel="noopener">'
"http://www.example.org/<foo</a>"
)
def test_urlize_filter_closing_punctuation(self, env):
tmpl = env.from_string(
'{{ "(see http://www.example.org/?page=subj_<desc.h>)"|urlize }}'
)
assert tmpl.render() == (
'(see <a href="http://www.example.org/?page=subj_<desc.h>" '
'rel="noopener">http://www.example.org/?page=subj_<desc.h></a>)'
)
def test_loop_call_loop(self, env):
tmpl = env.from_string(
"""
{% macro test() %}
{{ caller() }}
{% endmacro %}
{% for num1 in range(5) %}
{% call test() %}
{% for num2 in range(10) %}
{{ loop.index }}
{% endfor %}
{% endcall %}
{% endfor %}
"""
)
assert tmpl.render().split() == [str(x) for x in range(1, 11)] * 5
def test_weird_inline_comment(self, env):
env = Environment(line_statement_prefix="%")
pytest.raises(
TemplateSyntaxError,
env.from_string,
"% for item in seq {# missing #}\n...% endfor",
)
def test_old_macro_loop_scoping_bug(self, env):
tmpl = env.from_string(
"{% for i in (1, 2) %}{{ i }}{% endfor %}"
"{% macro i() %}3{% endmacro %}{{ i() }}"
)
assert tmpl.render() == "123"
def test_partial_conditional_assignments(self, env):
tmpl = env.from_string("{% if b %}{% set a = 42 %}{% endif %}{{ a }}")
assert tmpl.render(a=23) == "23"
assert tmpl.render(b=True) == "42"
def test_stacked_locals_scoping_bug(self, env):
env = Environment(line_statement_prefix="#")
t = env.from_string(
"""\
# for j in [1, 2]:
# set x = 1
# for i in [1, 2]:
# print x
# if i % 2 == 0:
# set x = x + 1
# endif
# endfor
# endfor
# if a
# print 'A'
# elif b
# print 'B'
# elif c == d
# print 'C'
# else
# print 'D'
# endif
"""
)
assert t.render(a=0, b=False, c=42, d=42.0) == "1111C"
def test_stacked_locals_scoping_bug_twoframe(self, env):
t = Template(
"""
{% set x = 1 %}
{% for item in foo %}
{% if item == 1 %}
{% set x = 2 %}
{% endif %}
{% endfor %}
{{ x }}
"""
)
rv = t.render(foo=[1]).strip()
assert rv == "1"
def test_call_with_args(self, env):
t = Template(
"""{% macro dump_users(users) -%}
<ul>
{%- for user in users -%}
<li><p>{{ user.username|e }}</p>{{ caller(user) }}</li>
{%- endfor -%}
</ul>
{%- endmacro -%}
{% call(user) dump_users(list_of_user) -%}
<dl>
<dl>Realname</dl>
<dd>{{ user.realname|e }}</dd>
<dl>Description</dl>
<dd>{{ user.description }}</dd>
</dl>
{% endcall %}"""
)
assert [
x.strip()
for x in t.render(
list_of_user=[
{
"username": "apo",
"realname": "something else",
"description": "test",
}
]
).splitlines()
] == [
"<ul><li><p>apo</p><dl>",
"<dl>Realname</dl>",
"<dd>something else</dd>",
"<dl>Description</dl>",
"<dd>test</dd>",
"</dl>",
"</li></ul>",
]
def test_empty_if_condition_fails(self, env):
pytest.raises(TemplateSyntaxError, Template, "{% if %}....{% endif %}")
pytest.raises(
TemplateSyntaxError, Template, "{% if foo %}...{% elif %}...{% endif %}"
)
pytest.raises(TemplateSyntaxError, Template, "{% for x in %}..{% endfor %}")
def test_recursive_loop_compile(self, env):
Template(
"""
{% for p in foo recursive%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
"""
)
Template(
"""
{% for p in foo%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
"""
)
def test_else_loop_bug(self, env):
t = Template(
"""
{% for x in y %}
{{ loop.index0 }}
{% else %}
{% for i in range(3) %}{{ i }}{% endfor %}
{% endfor %}
"""
)
assert t.render(y=[]).strip() == "012"
def test_correct_prefix_loader_name(self, env):
env = Environment(loader=PrefixLoader({"foo": DictLoader({})}))
with pytest.raises(TemplateNotFound) as e:
env.get_template("foo/bar.html")
assert e.value.name == "foo/bar.html"
def test_pass_context_callable_class(self, env):
class CallableClass:
@pass_context
def __call__(self, ctx):
return ctx.resolve("hello")
tpl = Template("""{{ callableclass() }}""")
output = tpl.render(callableclass=CallableClass(), hello="TEST")
expected = "TEST"
assert output == expected
def test_block_set_with_extends(self):
env = Environment(
loader=DictLoader({"main": "{% block body %}[{{ x }}]{% endblock %}"})
)
t = env.from_string('{% extends "main" %}{% set x %}42{% endset %}')
assert t.render() == "[42]"
def test_nested_for_else(self, env):
tmpl = env.from_string(
"{% for x in y %}{{ loop.index0 }}{% else %}"
"{% for i in range(3) %}{{ i }}{% endfor %}"
"{% endfor %}"
)
assert tmpl.render() == "012"
def test_macro_var_bug(self, env):
tmpl = env.from_string(
"""
{% set i = 1 %}
{% macro test() %}
{% for i in range(0, 10) %}{{ i }}{% endfor %}
{% endmacro %}{{ test() }}
"""
)
assert tmpl.render().strip() == "0123456789"
def test_macro_var_bug_advanced(self, env):
tmpl = env.from_string(
"""
{% macro outer() %}
{% set i = 1 %}
{% macro test() %}
{% for i in range(0, 10) %}{{ i }}{% endfor %}
{% endmacro %}{{ test() }}
{% endmacro %}{{ outer() }}
"""
)
assert tmpl.render().strip() == "0123456789"
def test_callable_defaults(self):
env = Environment()
env.globals["get_int"] = lambda: 42
t = env.from_string(
"""
{% macro test(a, b, c=get_int()) -%}
{{ a + b + c }}
{%- endmacro %}
{{ test(1, 2) }}|{{ test(1, 2, 3) }}
"""
)
assert t.render().strip() == "45|6"
def test_macro_escaping(self):
env = Environment(autoescape=lambda x: False)
template = "{% macro m() %}<html>{% endmacro %}"
template += "{% autoescape true %}{{ m() }}{% endautoescape %}"
assert env.from_string(template).render()
def test_macro_scoping(self, env):
tmpl = env.from_string(
"""
{% set n=[1,2,3,4,5] %}
{% for n in [[1,2,3], [3,4,5], [5,6,7]] %}
{% macro x(l) %}
{{ l.pop() }}
{% if l %}{{ x(l) }}{% endif %}
{% endmacro %}
{{ x(n) }}
{% endfor %}
"""
)
assert list(map(int, tmpl.render().split())) == [3, 2, 1, 5, 4, 3, 7, 6, 5]
def test_scopes_and_blocks(self):
env = Environment(
loader=DictLoader(
{
"a.html": """
{%- set foo = 'bar' -%}
{% include 'x.html' -%}
""",
"b.html": """
{%- set foo = 'bar' -%}
{% block test %}{% include 'x.html' %}{% endblock -%}
""",
"c.html": """
{%- set foo = 'bar' -%}
{% block test %}{% set foo = foo
%}{% include 'x.html' %}{% endblock -%}
""",
"x.html": """{{ foo }}|{{ test }}""",
}
)
)
a = env.get_template("a.html")
b = env.get_template("b.html")
c = env.get_template("c.html")
assert a.render(test="x").strip() == "bar|x"
assert b.render(test="x").strip() == "bar|x"
assert c.render(test="x").strip() == "bar|x"
def test_scopes_and_include(self):
env = Environment(
loader=DictLoader(
{
"include.html": "{{ var }}",
"base.html": '{% include "include.html" %}',
"child.html": '{% extends "base.html" %}{% set var = 42 %}',
}
)
)
t = env.get_template("child.html")
assert t.render() == "42"
def test_caller_scoping(self, env):
t = env.from_string(
"""
{% macro detail(icon, value) -%}
{% if value -%}
<p><span class="fa fa-fw fa-{{ icon }}"></span>
{%- if caller is undefined -%}
{{ value }}
{%- else -%}
{{ caller(value, *varargs) }}
{%- endif -%}</p>
{%- endif %}
{%- endmacro %}
{% macro link_detail(icon, value, href) -%}
{% call(value, href) detail(icon, value, href) -%}
<a href="{{ href }}">{{ value }}</a>
{%- endcall %}
{%- endmacro %}
"""
)
assert t.module.link_detail("circle", "Index", "/") == (
'<p><span class="fa fa-fw fa-circle"></span><a href="/">Index</a></p>'
)
def test_variable_reuse(self, env):
t = env.from_string("{% for x in x.y %}{{ x }}{% endfor %}")
assert t.render(x={"y": [0, 1, 2]}) == "012"
t = env.from_string("{% for x in x.y %}{{ loop.index0 }}|{{ x }}{% endfor %}")
assert t.render(x={"y": [0, 1, 2]}) == "0|01|12|2"
t = env.from_string("{% for x in x.y recursive %}{{ x }}{% endfor %}")
assert t.render(x={"y": [0, 1, 2]}) == "012"
def test_double_caller(self, env):
t = env.from_string(
"{% macro x(caller=none) %}[{% if caller %}"
"{{ caller() }}{% endif %}]{% endmacro %}"
"{{ x() }}{% call x() %}aha!{% endcall %}"
)
assert t.render() == "[][aha!]"
def test_double_caller_no_default(self, env):
with pytest.raises(TemplateAssertionError) as exc_info:
env.from_string(
"{% macro x(caller) %}[{% if caller %}"
"{{ caller() }}{% endif %}]{% endmacro %}"
)
assert exc_info.match(
r'"caller" argument must be omitted or ' r"be given a default"
)
t = env.from_string(
"{% macro x(caller=none) %}[{% if caller %}"
"{{ caller() }}{% endif %}]{% endmacro %}"
)
with pytest.raises(TypeError) as exc_info:
t.module.x(None, caller=lambda: 42)
assert exc_info.match(
r"\'x\' was invoked with two values for the " r"special caller argument"
)
def test_macro_blocks(self, env):
t = env.from_string(
"{% macro x() %}{% block foo %}x{% endblock %}{% endmacro %}{{ x() }}"
)
assert t.render() == "x"
def test_scoped_block(self, env):
t = env.from_string(
"{% set x = 1 %}{% with x = 2 %}{% block y scoped %}"
"{{ x }}{% endblock %}{% endwith %}"
)
assert t.render() == "2"
def test_recursive_loop_filter(self, env):
t = env.from_string(
"""
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{%- for page in [site.root] if page.url != this recursive %}
<url><loc>{{ page.url }}</loc></url>
{{- loop(page.children) }}
{%- endfor %}
</urlset>
"""
)
sm = t.render(
this="/foo",
site={"root": {"url": "/", "children": [{"url": "/foo"}, {"url": "/bar"}]}},
)
lines = [x.strip() for x in sm.splitlines() if x.strip()]
assert lines == [
'<?xml version="1.0" encoding="UTF-8"?>',
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">',
"<url><loc>/</loc></url>",
"<url><loc>/bar</loc></url>",
"</urlset>",
]
def test_empty_if(self, env):
t = env.from_string("{% if foo %}{% else %}42{% endif %}")
assert t.render(foo=False) == "42"
def test_subproperty_if(self, env):
t = env.from_string(
"{% if object1.subproperty1 is eq object2.subproperty2 %}42{% endif %}"
)
assert (
t.render(
object1={"subproperty1": "value"}, object2={"subproperty2": "value"}
)
== "42"
)
def test_set_and_include(self):
env = Environment(
loader=DictLoader(
{
"inc": "bar",
"main": '{% set foo = "foo" %}{{ foo }}{% include "inc" %}',
}
)
)
assert env.get_template("main").render() == "foobar"
def test_loop_include(self):
env = Environment(
loader=DictLoader(
{
"inc": "{{ i }}",
"main": '{% for i in [1, 2, 3] %}{% include "inc" %}{% endfor %}',
}
)
)
assert env.get_template("main").render() == "123"
def test_grouper_repr(self):
from jinja2.filters import _GroupTuple
t = _GroupTuple("foo", [1, 2])
assert t.grouper == "foo"
assert t.list == [1, 2]
assert repr(t) == "('foo', [1, 2])"
assert str(t) == "('foo', [1, 2])"
def test_custom_context(self, env):
from jinja2.runtime import Context
class MyContext(Context):
pass
class MyEnvironment(Environment):
context_class = MyContext
loader = DictLoader({"base": "{{ foobar }}", "test": '{% extends "base" %}'})
env = MyEnvironment(loader=loader)
assert env.get_template("test").render(foobar="test") == "test"
def test_recursive_loop_bug(self, env):
tmpl = env.from_string(
"{%- for value in values recursive %}1{% else %}0{% endfor -%}"
)
assert tmpl.render(values=[]) == "0"
def test_markup_and_chainable_undefined(self):
from markupsafe import Markup
from jinja2.runtime import ChainableUndefined
assert str(Markup(ChainableUndefined())) == ""
def test_scoped_block_loop_vars(self, env):
tmpl = env.from_string(
"""\
Start
{% for i in ["foo", "bar"] -%}
{% block body scoped -%}
{{ loop.index }}) {{ i }}{% if loop.last %} last{% endif -%}
{%- endblock %}
{% endfor -%}
End"""
)
assert tmpl.render() == "Start\n1) foo\n2) bar last\nEnd"
def test_pass_context_loop_vars(self, env):
@pass_context
def test(ctx):
return f"{ctx['i']}{ctx['j']}"
tmpl = env.from_string(
"""\
{% set i = 42 %}
{%- for idx in range(2) -%}
{{ i }}{{ j }}
{% set i = idx -%}
{%- set j = loop.index -%}
{{ test() }}
{{ i }}{{ j }}
{% endfor -%}
{{ i }}{{ j }}"""
)
tmpl.globals["test"] = test
assert tmpl.render() == "42\n01\n01\n42\n12\n12\n42"
def test_pass_context_scoped_loop_vars(self, env):
@pass_context
def test(ctx):
return f"{ctx['i']}"
tmpl = env.from_string(
"""\
{% set i = 42 %}
{%- for idx in range(2) -%}
{{ i }}
{%- set i = loop.index0 -%}
{% block body scoped %}
{{ test() }}
{% endblock -%}
{% endfor -%}
{{ i }}"""
)
tmpl.globals["test"] = test
assert tmpl.render() == "42\n0\n42\n1\n42"
def test_pass_context_in_blocks(self, env):
@pass_context
def test(ctx):
return f"{ctx['i']}"
tmpl = env.from_string(
"""\
{%- set i = 42 -%}
{{ i }}
{% block body -%}
{% set i = 24 -%}
{{ test() }}
{% endblock -%}
{{ i }}"""
)
tmpl.globals["test"] = test
assert tmpl.render() == "42\n24\n42"
def test_pass_context_block_and_loop(self, env):
@pass_context
def test(ctx):
return f"{ctx['i']}"
tmpl = env.from_string(
"""\
{%- set i = 42 -%}
{% for idx in range(2) -%}
{{ test() }}
{%- set i = idx -%}
{% block body scoped %}
{{ test() }}
{% set i = 24 -%}
{{ test() }}
{% endblock -%}
{{ test() }}
{% endfor -%}
{{ test() }}"""
)
tmpl.globals["test"] = test
# values set within a block or loop should not
# show up outside of it
assert tmpl.render() == "42\n0\n24\n0\n42\n1\n24\n1\n42"
@pytest.mark.parametrize("op", ["extends", "include"])
def test_cached_extends(self, op):
env = Environment(
loader=DictLoader(
{"base": "{{ x }} {{ y }}", "main": f"{{% {op} 'base' %}}"}
)
)
env.globals["x"] = "x"
env.globals["y"] = "y"
# template globals overlay env globals
tmpl = env.get_template("main", globals={"x": "bar"})
assert tmpl.render() == "bar y"
# base was loaded indirectly, it just has env globals
tmpl = env.get_template("base")
assert tmpl.render() == "x y"
# set template globals for base, no longer uses env globals
tmpl = env.get_template("base", globals={"x": 42})
assert tmpl.render() == "42 y"
# templates are cached, they keep template globals set earlier
tmpl = env.get_template("main")
assert tmpl.render() == "bar y"
tmpl = env.get_template("base")
assert tmpl.render() == "42 y"
def test_nested_loop_scoping(self, env):
tmpl = env.from_string(
"{% set output %}{% for x in [1,2,3] %}hello{% endfor %}"
"{% endset %}{{ output }}"
)
assert tmpl.render() == "hellohellohello"
@pytest.mark.parametrize("unicode_char", ["\N{FORM FEED}", "\x85"])
def test_unicode_whitespace(env, unicode_char):
content = "Lorem ipsum\n" + unicode_char + "\nMore text"
tmpl = env.from_string(content)
assert tmpl.render() == content
| 22,249 | 28.865772 | 88 | py |
jinja | jinja-main/tests/test_api.py | import shutil
import tempfile
from pathlib import Path
import pytest
from jinja2 import ChainableUndefined
from jinja2 import DebugUndefined
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import is_undefined
from jinja2 import make_logging_undefined
from jinja2 import meta
from jinja2 import StrictUndefined
from jinja2 import Template
from jinja2 import TemplatesNotFound
from jinja2 import Undefined
from jinja2 import UndefinedError
from jinja2.compiler import CodeGenerator
from jinja2.runtime import Context
from jinja2.utils import Cycler
from jinja2.utils import pass_context
from jinja2.utils import pass_environment
from jinja2.utils import pass_eval_context
class TestExtendedAPI:
def test_item_and_attribute(self, env):
from jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
tmpl = env.from_string("{{ foo.items()|list }}")
assert tmpl.render(foo={"items": 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={"items": 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={"items": 42}) == "42"
def test_finalize(self):
e = Environment(finalize=lambda v: "" if v is None else v)
t = e.from_string("{% for item in seq %}|{{ item }}{% endfor %}")
assert t.render(seq=(None, 1, "foo")) == "||1|foo"
def test_finalize_constant_expression(self):
e = Environment(finalize=lambda v: "" if v is None else v)
t = e.from_string("<{{ none }}>")
assert t.render() == "<>"
def test_no_finalize_template_data(self):
e = Environment(finalize=lambda v: type(v).__name__)
t = e.from_string("<{{ value }}>")
# If template data was finalized, it would print "strintstr".
assert t.render(value=123) == "<int>"
def test_context_finalize(self):
@pass_context
def finalize(context, value):
return value * context["scale"]
e = Environment(finalize=finalize)
t = e.from_string("{{ value }}")
assert t.render(value=5, scale=3) == "15"
def test_eval_finalize(self):
@pass_eval_context
def finalize(eval_ctx, value):
return str(eval_ctx.autoescape) + value
e = Environment(finalize=finalize, autoescape=True)
t = e.from_string("{{ value }}")
assert t.render(value="<script>") == "True<script>"
def test_env_autoescape(self):
@pass_environment
def finalize(env, value):
return " ".join(
(env.variable_start_string, repr(value), env.variable_end_string)
)
e = Environment(finalize=finalize)
t = e.from_string("{{ value }}")
assert t.render(value="hello") == "{{ 'hello' }}"
def test_cycler(self, env):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert next(c) == item
next(c)
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self, env):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self, env):
t = Template("Content")
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_get_template_undefined(self, env):
"""Passing Undefined to get/select_template raises an
UndefinedError or shows the undefined message in the list.
"""
env.loader = DictLoader({})
t = Undefined(name="no_name_1")
with pytest.raises(UndefinedError):
env.get_template(t)
with pytest.raises(UndefinedError):
env.get_or_select_template(t)
with pytest.raises(UndefinedError):
env.select_template(t)
with pytest.raises(TemplatesNotFound) as exc_info:
env.select_template([t, "no_name_2"])
exc_message = str(exc_info.value)
assert "'no_name_1' is undefined" in exc_message
assert "no_name_2" in exc_message
def test_autoescape_autoselect(self, env):
def select_autoescape(name):
if name is None or "." not in name:
return False
return name.endswith(".html")
env = Environment(
autoescape=select_autoescape,
loader=DictLoader({"test.txt": "{{ foo }}", "test.html": "{{ foo }}"}),
)
t = env.get_template("test.txt")
assert t.render(foo="<foo>") == "<foo>"
t = env.get_template("test.html")
assert t.render(foo="<foo>") == "<foo>"
t = env.from_string("{{ foo }}")
assert t.render(foo="<foo>") == "<foo>"
def test_sandbox_max_range(self, env):
from jinja2.sandbox import SandboxedEnvironment, MAX_RANGE
env = SandboxedEnvironment()
t = env.from_string("{% for item in range(total) %}{{ item }}{% endfor %}")
with pytest.raises(OverflowError):
t.render(total=MAX_RANGE + 1)
class TestMeta:
def test_find_undeclared_variables(self, env):
ast = env.parse("{% set foo = 42 %}{{ bar + foo }}")
x = meta.find_undeclared_variables(ast)
assert x == {"bar"}
ast = env.parse(
"{% set foo = 42 %}{{ bar + foo }}"
"{% macro meh(x) %}{{ x }}{% endmacro %}"
"{% for item in seq %}{{ muh(item) + meh(seq) }}"
"{% endfor %}"
)
x = meta.find_undeclared_variables(ast)
assert x == {"bar", "seq", "muh"}
ast = env.parse("{% for x in range(5) %}{{ x }}{% endfor %}{{ foo }}")
x = meta.find_undeclared_variables(ast)
assert x == {"foo"}
def test_find_refererenced_templates(self, env):
ast = env.parse('{% extends "layout.html" %}{% include helper %}')
i = meta.find_referenced_templates(ast)
assert next(i) == "layout.html"
assert next(i) is None
assert list(i) == []
ast = env.parse(
'{% extends "layout.html" %}'
'{% from "test.html" import a, b as c %}'
'{% import "meh.html" as meh %}'
'{% include "muh.html" %}'
)
i = meta.find_referenced_templates(ast)
assert list(i) == ["layout.html", "test.html", "meh.html", "muh.html"]
def test_find_included_templates(self, env):
ast = env.parse('{% include ["foo.html", "bar.html"] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ["foo.html", "bar.html"]
ast = env.parse('{% include ("foo.html", "bar.html") %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ["foo.html", "bar.html"]
ast = env.parse('{% include ["foo.html", "bar.html", foo] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ["foo.html", "bar.html", None]
ast = env.parse('{% include ("foo.html", "bar.html", foo) %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ["foo.html", "bar.html", None]
class TestStreaming:
def test_basic_streaming(self, env):
t = env.from_string(
"<ul>{% for item in seq %}<li>{{ loop.index }} - {{ item }}</li>"
"{%- endfor %}</ul>"
)
stream = t.stream(seq=list(range(3)))
assert next(stream) == "<ul>"
assert "".join(stream) == "<li>1 - 0</li><li>2 - 1</li><li>3 - 2</li></ul>"
def test_buffered_streaming(self, env):
tmpl = env.from_string(
"<ul>{% for item in seq %}<li>{{ loop.index }} - {{ item }}</li>"
"{%- endfor %}</ul>"
)
stream = tmpl.stream(seq=list(range(3)))
stream.enable_buffering(size=3)
assert next(stream) == "<ul><li>1"
assert next(stream) == " - 0</li>"
def test_streaming_behavior(self, env):
tmpl = env.from_string("")
stream = tmpl.stream()
assert not stream.buffered
stream.enable_buffering(20)
assert stream.buffered
stream.disable_buffering()
assert not stream.buffered
def test_dump_stream(self, env):
tmp = Path(tempfile.mkdtemp())
try:
tmpl = env.from_string("\u2713")
stream = tmpl.stream()
stream.dump(str(tmp / "dump.txt"), "utf-8")
assert (tmp / "dump.txt").read_bytes() == b"\xe2\x9c\x93"
finally:
shutil.rmtree(tmp)
class TestUndefined:
def test_stopiteration_is_undefined(self):
def test():
raise StopIteration()
t = Template("A{{ test() }}B")
assert t.render(test=test) == "AB"
t = Template("A{{ test().missingattribute }}B")
pytest.raises(UndefinedError, t.render, test=test)
def test_undefined_and_special_attributes(self):
with pytest.raises(AttributeError):
Undefined("Foo").__dict__
def test_undefined_attribute_error(self):
# Django's LazyObject turns the __class__ attribute into a
# property that resolves the wrapped function. If that wrapped
# function raises an AttributeError, printing the repr of the
# object in the undefined message would cause a RecursionError.
class Error:
@property # type: ignore
def __class__(self):
raise AttributeError()
u = Undefined(obj=Error(), name="hello")
with pytest.raises(UndefinedError):
getattr(u, "recursion", None)
def test_logging_undefined(self):
_messages = []
class DebugLogger:
def warning(self, msg, *args):
_messages.append("W:" + msg % args)
def error(self, msg, *args):
_messages.append("E:" + msg % args)
logging_undefined = make_logging_undefined(DebugLogger())
env = Environment(undefined=logging_undefined)
assert env.from_string("{{ missing }}").render() == ""
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
assert _messages == [
"W:Template variable warning: 'missing' is undefined",
"E:Template variable error: 'missing' is undefined",
"W:Template variable warning: 'missing' is undefined",
"W:Template variable warning: 'int object' has no attribute 'missing'",
"W:Template variable warning: 'missing' is undefined",
]
def test_default_undefined(self):
env = Environment(undefined=Undefined)
assert env.from_string("{{ missing }}").render() == ""
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
pytest.raises(UndefinedError, env.from_string("{{ missing - 1}}").render)
assert env.from_string("{{ 'foo' in missing }}").render() == "False"
und1 = Undefined(name="x")
und2 = Undefined(name="y")
assert und1 == und2
assert und1 != 42
assert hash(und1) == hash(und2) == hash(Undefined())
with pytest.raises(AttributeError):
getattr(Undefined, "__slots__") # noqa: B009
def test_chainable_undefined(self):
env = Environment(undefined=ChainableUndefined)
# The following tests are copied from test_default_undefined
assert env.from_string("{{ missing }}").render() == ""
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert env.from_string("{{ foo.missing }}").render(foo=42) == ""
assert env.from_string("{{ not missing }}").render() == "True"
pytest.raises(UndefinedError, env.from_string("{{ missing - 1}}").render)
with pytest.raises(AttributeError):
getattr(ChainableUndefined, "__slots__") # noqa: B009
# The following tests ensure subclass functionality works as expected
assert env.from_string('{{ missing.bar["baz"] }}').render() == ""
assert env.from_string('{{ foo.bar["baz"]._undefined_name }}').render() == "foo"
assert (
env.from_string('{{ foo.bar["baz"]._undefined_name }}').render(foo=42)
== "bar"
)
assert (
env.from_string('{{ foo.bar["baz"]._undefined_name }}').render(
foo={"bar": 42}
)
== "baz"
)
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
assert env.from_string("{{ missing }}").render() == "{{ missing }}"
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
assert env.from_string("{{ missing|list }}").render() == "[]"
assert env.from_string("{{ missing is not defined }}").render() == "True"
assert (
env.from_string("{{ foo.missing }}").render(foo=42)
== "{{ no such element: int object['missing'] }}"
)
assert env.from_string("{{ not missing }}").render() == "True"
undefined_hint = "this is testing undefined hint of DebugUndefined"
assert (
str(DebugUndefined(hint=undefined_hint))
== f"{{{{ undefined value printed: {undefined_hint} }}}}"
)
with pytest.raises(AttributeError):
getattr(DebugUndefined, "__slots__") # noqa: B009
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
pytest.raises(UndefinedError, env.from_string("{{ missing }}").render)
pytest.raises(UndefinedError, env.from_string("{{ missing.attribute }}").render)
pytest.raises(UndefinedError, env.from_string("{{ missing|list }}").render)
pytest.raises(UndefinedError, env.from_string("{{ 'foo' in missing }}").render)
assert env.from_string("{{ missing is not defined }}").render() == "True"
pytest.raises(
UndefinedError, env.from_string("{{ foo.missing }}").render, foo=42
)
pytest.raises(UndefinedError, env.from_string("{{ not missing }}").render)
assert (
env.from_string('{{ missing|default("default", true) }}').render()
== "default"
)
with pytest.raises(AttributeError):
getattr(StrictUndefined, "__slots__") # noqa: B009
assert env.from_string('{{ "foo" if false }}').render() == ""
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
pytest.raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
with pytest.raises(UndefinedError, match="'None' has no attribute 'split'"):
Environment().getattr(None, "split")()
def test_object_repr(self):
with pytest.raises(
UndefinedError, match="'int object' has no attribute 'upper'"
):
Undefined(obj=42, name="upper")()
class TestLowLevel:
def test_custom_code_generator(self):
class CustomCodeGenerator(CodeGenerator):
def visit_Const(self, node, frame=None):
# This method is pure nonsense, but works fine for testing...
if node.value == "foo":
self.write(repr("bar"))
else:
super().visit_Const(node, frame)
class CustomEnvironment(Environment):
code_generator_class = CustomCodeGenerator
env = CustomEnvironment()
tmpl = env.from_string('{% set foo = "foo" %}{{ foo }}')
assert tmpl.render() == "bar"
def test_custom_context(self):
class CustomContext(Context):
def resolve_or_missing(self, key):
return "resolve-" + key
class CustomEnvironment(Environment):
context_class = CustomContext
env = CustomEnvironment()
tmpl = env.from_string("{{ foo }}")
assert tmpl.render() == "resolve-foo"
| 16,999 | 38.08046 | 88 | py |
jinja | jinja-main/tests/test_pickle.py | import pickle
def test_environment(env):
env = pickle.loads(pickle.dumps(env))
assert env.from_string("x={{ x }}").render(x=42) == "x=42"
| 148 | 20.285714 | 62 | py |
jinja | jinja-main/tests/test_security.py | import pytest
from markupsafe import escape
from jinja2 import Environment
from jinja2.exceptions import SecurityError
from jinja2.exceptions import TemplateRuntimeError
from jinja2.exceptions import TemplateSyntaxError
from jinja2.nodes import EvalContext
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.sandbox import SandboxedEnvironment
from jinja2.sandbox import unsafe
class PrivateStuff:
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return "PrivateStuff"
class PublicStuff:
def bar(self):
return 23
def _foo(self):
return 42
def __repr__(self):
return "PublicStuff"
class TestSandbox:
def test_unsafe(self, env):
env = SandboxedEnvironment()
pytest.raises(
SecurityError, env.from_string("{{ foo.foo() }}").render, foo=PrivateStuff()
)
assert env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()) == "23"
pytest.raises(
SecurityError, env.from_string("{{ foo._foo() }}").render, foo=PublicStuff()
)
assert env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()) == "23"
assert env.from_string("{{ foo.__class__ }}").render(foo=42) == ""
assert env.from_string("{{ foo.func_code }}").render(foo=lambda: None) == ""
# security error comes from __class__ already.
pytest.raises(
SecurityError,
env.from_string("{{ foo.__class__.__subclasses__() }}").render,
foo=42,
)
def test_immutable_environment(self, env):
env = ImmutableSandboxedEnvironment()
pytest.raises(SecurityError, env.from_string("{{ [].append(23) }}").render)
pytest.raises(SecurityError, env.from_string("{{ {1:2}.clear() }}").render)
def test_restricted(self, env):
env = SandboxedEnvironment()
pytest.raises(
TemplateSyntaxError,
env.from_string,
"{% for item.attribute in seq %}...{% endfor %}",
)
pytest.raises(
TemplateSyntaxError,
env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}",
)
def test_template_data(self, env):
env = Environment(autoescape=True)
t = env.from_string(
"{% macro say_hello(name) %}"
"<p>Hello {{ name }}!</p>{% endmacro %}"
'{{ say_hello("<blink>foo</blink>") }}'
)
escaped_out = "<p>Hello <blink>foo</blink>!</p>"
assert t.render() == escaped_out
assert str(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello("<blink>foo</blink>") == escaped_out
assert (
escape(t.module.say_hello(EvalContext(env), "<blink>foo</blink>"))
== escaped_out
)
assert escape(t.module.say_hello("<blink>foo</blink>")) == escaped_out
def test_attr_filter(self, env):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
pytest.raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self, env):
def disable_op(left, right):
raise TemplateRuntimeError("that operator so does not work")
for expr, ctx, rv in ("1 + 2", {}, "3"), ("a + 2", {"a": 2}, "4"):
env = SandboxedEnvironment()
env.binop_table["+"] = disable_op
t = env.from_string(f"{{{{ {expr} }}}}")
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(["+"])
t = env.from_string(f"{{{{ {expr} }}}}")
with pytest.raises(TemplateRuntimeError):
t.render(ctx)
def test_unary_operator_intercepting(self, env):
def disable_op(arg):
raise TemplateRuntimeError("that operator so does not work")
for expr, ctx, rv in ("-1", {}, "-1"), ("-a", {"a": 2}, "-2"):
env = SandboxedEnvironment()
env.unop_table["-"] = disable_op
t = env.from_string(f"{{{{ {expr} }}}}")
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(["-"])
t = env.from_string(f"{{{{ {expr} }}}}")
with pytest.raises(TemplateRuntimeError):
t.render(ctx)
class TestStringFormat:
def test_basic_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{0.__class__}b".format(42) }}')
assert t.render() == "ab"
def test_basic_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{0.foo}b".format({"foo": 42}) }}')
assert t.render() == "a42b"
def test_safe_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ ("a{0.__class__}b{1}"|safe).format(42, "<foo>") }}')
assert t.render() == "ab<foo>"
def test_safe_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ ("a{0.foo}b{1}"|safe).format({"foo": 42}, "<foo>") }}')
assert t.render() == "a42b<foo>"
def test_empty_braces_format(self):
env = SandboxedEnvironment()
t1 = env.from_string('{{ ("a{}b{}").format("foo", "42")}}')
t2 = env.from_string('{{ ("a{}b{}"|safe).format(42, "<foo>") }}')
assert t1.render() == "afoob42"
assert t2.render() == "a42b<foo>"
class TestStringFormatMap:
def test_basic_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{x.__class__}b".format_map({"x":42}) }}')
assert t.render() == "ab"
def test_basic_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{x.foo}b".format_map({"x":{"foo": 42}}) }}')
assert t.render() == "a42b"
def test_safe_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string(
'{{ ("a{x.foo}b{y}"|safe).format_map({"x":{"foo": 42}, "y":"<foo>"}) }}'
)
assert t.render() == "a42b<foo>"
| 6,176 | 34.5 | 88 | py |
jinja | jinja-main/tests/test_runtime.py | import itertools
from jinja2 import Template
from jinja2.runtime import LoopContext
TEST_IDX_TEMPLATE_STR_1 = (
"[{% for i in lst|reverse %}(len={{ loop.length }},"
" revindex={{ loop.revindex }}, index={{ loop.index }}, val={{ i }}){% endfor %}]"
)
TEST_IDX0_TEMPLATE_STR_1 = (
"[{% for i in lst|reverse %}(len={{ loop.length }},"
" revindex0={{ loop.revindex0 }}, index0={{ loop.index0 }}, val={{ i }})"
"{% endfor %}]"
)
def test_loop_idx():
t = Template(TEST_IDX_TEMPLATE_STR_1)
lst = [10]
excepted_render = "[(len=1, revindex=1, index=1, val=10)]"
assert excepted_render == t.render(lst=lst)
def test_loop_idx0():
t = Template(TEST_IDX0_TEMPLATE_STR_1)
lst = [10]
excepted_render = "[(len=1, revindex0=0, index0=0, val=10)]"
assert excepted_render == t.render(lst=lst)
def test_loopcontext0():
in_lst = []
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_loopcontext1():
in_lst = [10]
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_loopcontext2():
in_lst = [10, 11]
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_iterator_not_advanced_early():
t = Template("{% for _, g in gs %}{{ loop.index }} {{ g|list }}\n{% endfor %}")
out = t.render(
gs=itertools.groupby([(1, "a"), (1, "b"), (2, "c"), (3, "d")], lambda x: x[0])
)
# groupby groups depend on the current position of the iterator. If
# it was advanced early, the lists would appear empty.
assert out == "1 [(1, 'a'), (1, 'b')]\n2 [(2, 'c')]\n3 [(3, 'd')]\n"
def test_mock_not_pass_arg_marker():
"""If a callable class has a ``__getattr__`` that returns True-like
values for arbitrary attrs, it should not be incorrectly identified
as a ``pass_context`` function.
"""
class Calc:
def __getattr__(self, item):
return object()
def __call__(self, *args, **kwargs):
return len(args) + len(kwargs)
t = Template("{{ calc() }}")
out = t.render(calc=Calc())
# Would be "1" if context argument was passed.
assert out == "0"
| 2,192 | 27.855263 | 86 | py |
jinja | jinja-main/tests/conftest.py | from pathlib import Path
import pytest
from jinja2 import loaders
from jinja2.environment import Environment
@pytest.fixture
def env():
"""returns a new environment."""
return Environment()
@pytest.fixture
def dict_loader():
"""returns DictLoader"""
return loaders.DictLoader({"justdict.html": "FOO"})
@pytest.fixture
def package_loader():
"""returns PackageLoader initialized from templates"""
return loaders.PackageLoader("res", "templates")
@pytest.fixture
def filesystem_loader():
"""returns FileSystemLoader initialized to res/templates directory"""
here = Path(__file__).parent.resolve()
return loaders.FileSystemLoader(here / "res" / "templates")
@pytest.fixture
def function_loader():
"""returns a FunctionLoader"""
return loaders.FunctionLoader({"justfunction.html": "FOO"}.get)
@pytest.fixture
def choice_loader(dict_loader, package_loader):
"""returns a ChoiceLoader"""
return loaders.ChoiceLoader([dict_loader, package_loader])
@pytest.fixture
def prefix_loader(filesystem_loader, dict_loader):
"""returns a PrefixLoader"""
return loaders.PrefixLoader({"a": filesystem_loader, "b": dict_loader})
| 1,184 | 22.7 | 75 | py |
jinja | jinja-main/tests/test_filters.py | import random
from collections import namedtuple
import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2 import StrictUndefined
from jinja2 import TemplateRuntimeError
from jinja2 import UndefinedError
from jinja2.exceptions import TemplateAssertionError
class Magic:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class Magic2:
def __init__(self, value1, value2):
self.value1 = value1
self.value2 = value2
def __str__(self):
return f"({self.value1},{self.value2})"
class TestFilter:
def test_filter_calling(self, env):
rv = env.call_filter("sum", [1, 2, 3])
assert rv == 6
def test_capitalize(self, env):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == "Foo bar"
def test_center(self, env):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == " foo "
def test_default(self, env):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given="yes") == "no|False|no|yes"
@pytest.mark.parametrize(
"args,expect",
(
("", "[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]"),
("true", "[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]"),
('by="value"', "[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]"),
("reverse=true", "[('c', 2), ('b', 1), ('AB', 3), ('aa', 0)]"),
),
)
def test_dictsort(self, env, args, expect):
t = env.from_string(f"{{{{ foo|dictsort({args}) }}}}")
out = t.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == expect
def test_batch(self, env):
tmpl = env.from_string("{{ foo|batch(3)|list }}|{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]"
)
def test_slice(self, env):
tmpl = env.from_string("{{ foo|slice(3)|list }}|{{ foo|slice(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]"
)
def test_escape(self, env):
tmpl = env.from_string("""{{ '<">&'|escape }}""")
out = tmpl.render()
assert out == "<">&"
@pytest.mark.parametrize(
("chars", "expect"), [(None, "..stays.."), (".", " ..stays"), (" .", "stays")]
)
def test_trim(self, env, chars, expect):
tmpl = env.from_string("{{ foo|trim(chars) }}")
out = tmpl.render(foo=" ..stays..", chars=chars)
assert out == expect
def test_striptags(self, env):
tmpl = env.from_string("""{{ foo|striptags }}""")
out = tmpl.render(
foo=' <p>just a small \n <a href="#">'
"example</a> link</p>\n<p>to a webpage</p> "
"<!-- <p>and some commented stuff</p> -->"
)
assert out == "just a small example link to a webpage"
def test_filesizeformat(self, env):
tmpl = env.from_string(
"{{ 100|filesizeformat }}|"
"{{ 1000|filesizeformat }}|"
"{{ 1000000|filesizeformat }}|"
"{{ 1000000000|filesizeformat }}|"
"{{ 1000000000000|filesizeformat }}|"
"{{ 100|filesizeformat(true) }}|"
"{{ 1000|filesizeformat(true) }}|"
"{{ 1000000|filesizeformat(true) }}|"
"{{ 1000000000|filesizeformat(true) }}|"
"{{ 1000000000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|"
"1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB"
)
def test_filesizeformat_issue59(self, env):
tmpl = env.from_string(
"{{ 300|filesizeformat }}|"
"{{ 3000|filesizeformat }}|"
"{{ 3000000|filesizeformat }}|"
"{{ 3000000000|filesizeformat }}|"
"{{ 3000000000000|filesizeformat }}|"
"{{ 300|filesizeformat(true) }}|"
"{{ 3000|filesizeformat(true) }}|"
"{{ 3000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|2.9 KiB|2.9 MiB"
)
def test_first(self, env):
tmpl = env.from_string("{{ foo|first }}")
out = tmpl.render(foo=list(range(10)))
assert out == "0"
@pytest.mark.parametrize(
("value", "expect"), (("42", "42.0"), ("abc", "0.0"), ("32.32", "32.32"))
)
def test_float(self, env, value, expect):
t = env.from_string("{{ value|float }}")
assert t.render(value=value) == expect
def test_float_default(self, env):
t = env.from_string("{{ value|float(default=1.0) }}")
assert t.render(value="abc") == "1.0"
def test_format(self, env):
tmpl = env.from_string("{{ '%s|%s'|format('a', 'b') }}")
out = tmpl.render()
assert out == "a|b"
@staticmethod
def _test_indent_multiline_template(env, markup=False):
text = "\n".join(["", "foo bar", '"baz"', ""])
if markup:
text = Markup(text)
t = env.from_string("{{ foo|indent(2, false, false) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, false, true) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n '
t = env.from_string("{{ foo|indent(2, true, false) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, true, true) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n '
def test_indent(self, env):
self._test_indent_multiline_template(env)
t = env.from_string('{{ "jinja"|indent }}')
assert t.render() == "jinja"
t = env.from_string('{{ "jinja"|indent(first=true) }}')
assert t.render() == " jinja"
t = env.from_string('{{ "jinja"|indent(blank=true) }}')
assert t.render() == "jinja"
def test_indent_markup_input(self, env):
"""
Tests cases where the filter input is a Markup type
"""
self._test_indent_multiline_template(env, markup=True)
def test_indent_width_string(self, env):
t = env.from_string("{{ 'jinja\nflask'|indent(width='>>> ', first=True) }}")
assert t.render() == ">>> jinja\n>>> flask"
@pytest.mark.parametrize(
("value", "expect"),
(
("42", "42"),
("abc", "0"),
("32.32", "32"),
("12345678901234567890", "12345678901234567890"),
),
)
def test_int(self, env, value, expect):
t = env.from_string("{{ value|int }}")
assert t.render(value=value) == expect
@pytest.mark.parametrize(
("value", "base", "expect"),
(("0x4d32", 16, "19762"), ("011", 8, "9"), ("0x33Z", 16, "0")),
)
def test_int_base(self, env, value, base, expect):
t = env.from_string("{{ value|int(base=base) }}")
assert t.render(value=value, base=base) == expect
def test_int_default(self, env):
t = env.from_string("{{ value|int(default=1) }}")
assert t.render(value="abc") == "1"
def test_int_special_method(self, env):
class IntIsh:
def __int__(self):
return 42
t = env.from_string("{{ value|int }}")
assert t.render(value=IntIsh()) == "42"
def test_join(self, env):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == "1|2|3"
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == "<foo><span>foo</span>"
def test_join_attribute(self, env):
User = namedtuple("User", "username")
tmpl = env.from_string("""{{ users|join(', ', 'username') }}""")
assert tmpl.render(users=map(User, ["foo", "bar"])) == "foo, bar"
def test_last(self, env):
tmpl = env.from_string("""{{ foo|last }}""")
out = tmpl.render(foo=list(range(10)))
assert out == "9"
def test_length(self, env):
tmpl = env.from_string("""{{ "hello world"|length }}""")
out = tmpl.render()
assert out == "11"
def test_lower(self, env):
tmpl = env.from_string("""{{ "FOO"|lower }}""")
out = tmpl.render()
assert out == "foo"
def test_items(self, env):
d = {i: c for i, c in enumerate("abc")}
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render(d=d)
assert out == "[(0, 'a'), (1, 'b'), (2, 'c')]"
def test_items_undefined(self, env):
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render()
assert out == "[]"
def test_pprint(self, env):
from pprint import pformat
tmpl = env.from_string("""{{ data|pprint }}""")
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self, env, request):
# restore the random state when the test ends
state = random.getstate()
request.addfinalizer(lambda: random.setstate(state))
# generate the random values from a known seed
random.seed("jinja")
expected = [random.choice("1234567890") for _ in range(10)]
# check that the random sequence is generated again by a template
# ensures that filter result is not constant folded
random.seed("jinja")
t = env.from_string('{{ "1234567890"|random }}')
for value in expected:
assert t.render() == value
def test_reverse(self, env):
tmpl = env.from_string(
"{{ 'foobar'|reverse|join }}|{{ [1, 2, 3]|reverse|list }}"
)
assert tmpl.render() == "raboof|[3, 2, 1]"
def test_string(self, env):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string("""{{ obj|string }}""")
assert tmpl.render(obj=x) == str(x)
def test_title(self, env):
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "foo's bar"|title }}""")
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "f bar f"|title }}""")
assert tmpl.render() == "F Bar F"
tmpl = env.from_string("""{{ "foo-bar"|title }}""")
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string("""{{ "foo\tbar"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "FOO\tBAR"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "foo (bar)"|title }}""")
assert tmpl.render() == "Foo (Bar)"
tmpl = env.from_string("""{{ "foo {bar}"|title }}""")
assert tmpl.render() == "Foo {Bar}"
tmpl = env.from_string("""{{ "foo [bar]"|title }}""")
assert tmpl.render() == "Foo [Bar]"
tmpl = env.from_string("""{{ "foo <bar>"|title }}""")
assert tmpl.render() == "Foo <Bar>"
class Foo:
def __str__(self):
return "foo-bar"
tmpl = env.from_string("""{{ data|title }}""")
out = tmpl.render(data=Foo())
assert out == "Foo-Bar"
def test_truncate(self, env):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
"{{ smalldata|truncate(15) }}"
)
out = tmpl.render(data="foobar baz bar" * 1000, smalldata="foobar baz bar")
assert out == "foobar baz b>>>|foobar baz>>>|foobar baz bar"
def test_truncate_very_short(self, env):
tmpl = env.from_string(
'{{ "foo bar baz"|truncate(9) }}|{{ "foo bar baz"|truncate(9, true) }}'
)
out = tmpl.render()
assert out == "foo bar baz|foo bar baz"
def test_truncate_end_length(self, env):
tmpl = env.from_string('{{ "Joel is a slug"|truncate(7, true) }}')
out = tmpl.render()
assert out == "Joel..."
def test_upper(self, env):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == "FOO"
def test_urlize(self, env):
tmpl = env.from_string('{{ "foo example.org bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="https://example.org" rel="noopener">' "example.org</a> bar"
)
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/" rel="noopener">'
"http://www.example.com/</a> bar"
)
tmpl = env.from_string('{{ "foo mailto:email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
tmpl = env.from_string('{{ "foo email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
def test_urlize_rel_policy(self):
env = Environment()
env.policies["urlize.rel"] = None
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/">http://www.example.com/</a> bar'
)
def test_urlize_target_parameter(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target="_blank") }}'
)
assert (
tmpl.render()
== 'foo <a href="http://www.example.com/" rel="noopener" target="_blank">'
"http://www.example.com/</a> bar"
)
def test_urlize_extra_schemes_parameter(self, env):
tmpl = env.from_string(
'{{ "foo tel:+1-514-555-1234 ftp://localhost bar"|'
'urlize(extra_schemes=["tel:", "ftp:"]) }}'
)
assert tmpl.render() == (
'foo <a href="tel:+1-514-555-1234" rel="noopener">'
'tel:+1-514-555-1234</a> <a href="ftp://localhost" rel="noopener">'
"ftp://localhost</a> bar"
)
def test_wordcount(self, env):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == "3"
strict_env = Environment(undefined=StrictUndefined)
t = strict_env.from_string("{{ s|wordcount }}")
with pytest.raises(UndefinedError):
t.render()
def test_block(self, env):
tmpl = env.from_string("{% filter lower|escape %}<HEHE>{% endfilter %}")
assert tmpl.render() == "<hehe>"
def test_chaining(self, env):
tmpl = env.from_string("""{{ ['<foo>', '<bar>']|first|upper|escape }}""")
assert tmpl.render() == "<FOO>"
def test_sum(self, env):
tmpl = env.from_string("""{{ [1, 2, 3, 4, 5, 6]|sum }}""")
assert tmpl.render() == "21"
def test_sum_attributes(self, env):
tmpl = env.from_string("""{{ values|sum('value') }}""")
assert tmpl.render(values=[{"value": 23}, {"value": 1}, {"value": 18}]) == "42"
def test_sum_attributes_nested(self, env):
tmpl = env.from_string("""{{ values|sum('real.value') }}""")
assert (
tmpl.render(
values=[
{"real": {"value": 23}},
{"real": {"value": 1}},
{"real": {"value": 18}},
]
)
== "42"
)
def test_sum_attributes_tuple(self, env):
tmpl = env.from_string("""{{ values.items()|sum('1') }}""")
assert tmpl.render(values={"foo": 23, "bar": 1, "baz": 18}) == "42"
def test_abs(self, env):
tmpl = env.from_string("""{{ -1|abs }}|{{ 1|abs }}""")
assert tmpl.render() == "1|1", tmpl.render()
def test_round_positive(self, env):
tmpl = env.from_string(
"{{ 2.7|round }}|{{ 2.1|round }}|"
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}"
)
assert tmpl.render() == "3.0|2.0|2.123|3.0", tmpl.render()
def test_round_negative(self, env):
tmpl = env.from_string(
"{{ 21.3|round(-1)}}|"
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}"
)
assert tmpl.render() == "20.0|30.0|20.0", tmpl.render()
def test_xmlattr(self, env):
tmpl = env.from_string(
"{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}"
)
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self, env):
tmpl = env.from_string("{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_sort2(self, env):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == "AbcD"
def test_sort3(self, env):
tmpl = env.from_string("""{{ ['foo', 'Bar', 'blah']|sort }}""")
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value')|join }}""")
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == "1234"
def test_sort5(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value.0')|join }}""")
assert tmpl.render(items=map(Magic, [[3], [2], [4], [1]])) == "[1][2][3][4]"
def test_sort6(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value1,value2')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(2,2)(2,5)(3,1)"
)
def test_sort7(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value2,value1')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(3,1)(2,2)(2,5)"
)
def test_sort8(self, env):
tmpl = env.from_string(
"""{{ items|sort(attribute='value1.0,value2.0')|join }}"""
)
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]),
[([3], [1]), ([2], [2]), ([2], [1]), ([2], [5])],
)
)
== "([2],[1])([2],[2])([2],[5])([3],[1])"
)
def test_unique(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique) }}')
assert t.render() == "bA"
def test_unique_case_sensitive(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique(true)) }}')
assert t.render() == "bAa"
def test_unique_attribute(self, env):
t = env.from_string("{{ items|unique(attribute='value')|join }}")
assert t.render(items=map(Magic, [3, 2, 4, 1, 2])) == "3241"
@pytest.mark.parametrize(
"source,expect",
(
('{{ ["a", "B"]|min }}', "a"),
('{{ ["a", "B"]|min(case_sensitive=true) }}', "B"),
("{{ []|min }}", ""),
('{{ ["a", "B"]|max }}', "B"),
('{{ ["a", "B"]|max(case_sensitive=true) }}', "a"),
("{{ []|max }}", ""),
),
)
def test_min_max(self, env, source, expect):
t = env.from_string(source)
assert t.render() == expect
@pytest.mark.parametrize(("name", "expect"), [("min", "1"), ("max", "9")])
def test_min_max_attribute(self, env, name, expect):
t = env.from_string("{{ items|" + name + '(attribute="value") }}')
assert t.render(items=map(Magic, [5, 1, 9])) == expect
def test_groupby(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render().split("|") == ["1: 1, 2: 1, 1", "2: 2, 3", "3: 3, 4", ""]
def test_groupby_tuple_index(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render() == "a:1:2|b:1|"
def test_groupby_multidot(self, env):
Date = namedtuple("Date", "day,month,year")
Article = namedtuple("Article", "title,date")
articles = [
Article("aha", Date(1, 1, 1970)),
Article("interesting", Date(2, 1, 1970)),
Article("really?", Date(3, 1, 1970)),
Article("totally not", Date(1, 1, 1971)),
]
tmpl = env.from_string(
"""
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(articles=articles).split("|") == [
"1970[aha][interesting][really?]",
"1971[totally not]",
"",
]
def test_groupby_default(self, env):
tmpl = env.from_string(
"{% for city, items in users|groupby('city', default='NY') %}"
"{{ city }}: {{ items|map(attribute='name')|join(', ') }}\n"
"{% endfor %}"
)
out = tmpl.render(
users=[
{"name": "emma", "city": "NY"},
{"name": "smith", "city": "WA"},
{"name": "john"},
]
)
assert out == "NY: emma, john\nWA: smith\n"
@pytest.mark.parametrize(
("case_sensitive", "expect"),
[
(False, "a: 1, 3\nb: 2\n"),
(True, "A: 3\na: 1\nb: 2\n"),
],
)
def test_groupby_case(self, env, case_sensitive, expect):
tmpl = env.from_string(
"{% for k, vs in data|groupby('k', case_sensitive=cs) %}"
"{{ k }}: {{ vs|join(', ', attribute='v') }}\n"
"{% endfor %}"
)
out = tmpl.render(
data=[{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "A", "v": 3}],
cs=case_sensitive,
)
assert out == expect
def test_filtertag(self, env):
tmpl = env.from_string(
"{% filter upper|replace('FOO', 'foo') %}foobar{% endfilter %}"
)
assert tmpl.render() == "fooBAR"
def test_replace(self, env):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string="<foo>") == "42foo>"
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup("foo")) == "f>x<>x<"
def test_forceescape(self, env):
tmpl = env.from_string("{{ x|forceescape }}")
assert tmpl.render(x=Markup("<div />")) == "<div />"
def test_safe(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == "<div>foo</div>"
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == "<div>foo</div>"
@pytest.mark.parametrize(
("value", "expect"),
[
("Hello, world!", "Hello%2C%20world%21"),
("Hello, world\u203d", "Hello%2C%20world%E2%80%BD"),
({"f": 1}, "f=1"),
([("f", 1), ("z", 2)], "f=1&z=2"),
({"\u203d": 1}, "%E2%80%BD=1"),
({0: 1}, "0=1"),
([("a b/c", "a b/c")], "a+b%2Fc=a+b%2Fc"),
("a b/c", "a%20b/c"),
],
)
def test_urlencode(self, value, expect):
e = Environment(autoescape=True)
t = e.from_string("{{ value|urlencode }}")
assert t.render(value=value) == expect
def test_simple_map(self, env):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
assert tmpl.render() == "6"
def test_map_sum(self, env):
tmpl = env.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == "[3, 3, 15]"
def test_attribute_map(self, env):
User = namedtuple("User", "name")
env = Environment()
users = [
User("john"),
User("jane"),
User("mike"),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == "john|jane|mike"
def test_empty_map(self, env):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == "[]"
def test_map_default(self, env):
Fullname = namedtuple("Fullname", "firstname,lastname")
Firstname = namedtuple("Firstname", "firstname")
env = Environment()
tmpl = env.from_string(
'{{ users|map(attribute="lastname", default="smith")|join(", ") }}'
)
test_list = env.from_string(
'{{ users|map(attribute="lastname", default=["smith","x"])|join(", ") }}'
)
test_str = env.from_string(
'{{ users|map(attribute="lastname", default="")|join(", ") }}'
)
users = [
Fullname("john", "lennon"),
Fullname("jane", "edwards"),
Fullname("jon", None),
Firstname("mike"),
]
assert tmpl.render(users=users) == "lennon, edwards, None, smith"
assert test_list.render(users=users) == "lennon, edwards, None, ['smith', 'x']"
assert test_str.render(users=users) == "lennon, edwards, None, "
def test_simple_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
assert tmpl.render() == "1|3|5"
def test_bool_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
assert tmpl.render() == "1|2|3|4|5"
def test_simple_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
assert tmpl.render() == "2|4"
def test_bool_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
assert tmpl.render() == "None|False|0"
def test_simple_select_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|selectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|jane"
def test_simple_reject_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|rejectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "mike"
def test_func_select_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|selectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|mike"
def test_func_reject_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|rejectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "jane"
def test_json_dump(self):
env = Environment(autoescape=True)
t = env.from_string("{{ x|tojson }}")
assert t.render(x={"foo": "bar"}) == '{"foo": "bar"}'
assert t.render(x="\"ba&r'") == r'"\"ba\u0026r\u0027"'
assert t.render(x="<bar>") == r'"\u003cbar\u003e"'
def my_dumps(value, **options):
assert options == {"foo": "bar"}
return "42"
env.policies["json.dumps_function"] = my_dumps
env.policies["json.dumps_kwargs"] = {"foo": "bar"}
assert t.render(x=23) == "42"
def test_wordwrap(self, env):
env.newline_sequence = "\n"
t = env.from_string("{{ s|wordwrap(20) }}")
result = t.render(s="Hello!\nThis is Jinja saying something.")
assert result == "Hello!\nThis is Jinja saying\nsomething."
def test_filter_undefined(self, env):
with pytest.raises(TemplateAssertionError, match="No filter named 'f'"):
env.from_string("{{ var|f }}")
def test_filter_undefined_in_if(self, env):
t = env.from_string("{%- if x is defined -%}{{ x|f }}{%- else -%}x{% endif %}")
assert t.render() == "x"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_elif(self, env):
t = env.from_string(
"{%- if x is defined -%}{{ x }}{%- elif y is defined -%}"
"{{ y|f }}{%- else -%}foo{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(y=42)
def test_filter_undefined_in_else(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{{ x|f }}{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_nested_if(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{%- if y "
"is defined -%}{{ y|f }}{%- endif -%}{{ x }}{%- endif -%}"
)
assert t.render() == "foo"
assert t.render(x=42) == "42"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=24, y=42)
def test_filter_undefined_in_condexpr(self, env):
t1 = env.from_string("{{ x|f if x is defined else 'foo' }}")
t2 = env.from_string("{{ 'foo' if x is not defined else x|f }}")
assert t1.render() == t2.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t1.render(x=42)
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t2.render(x=42)
| 32,359 | 35.940639 | 88 | py |
jinja | jinja-main/tests/test_nativetypes.py | import math
import pytest
from jinja2.exceptions import UndefinedError
from jinja2.nativetypes import NativeEnvironment
from jinja2.nativetypes import NativeTemplate
from jinja2.runtime import Undefined
@pytest.fixture
def env():
return NativeEnvironment()
def test_is_defined_native_return(env):
t = env.from_string("{{ missing is defined }}")
assert not t.render()
def test_undefined_native_return(env):
t = env.from_string("{{ missing }}")
assert isinstance(t.render(), Undefined)
def test_adding_undefined_native_return(env):
t = env.from_string("{{ 3 + missing }}")
with pytest.raises(UndefinedError):
t.render()
def test_cast_int(env):
t = env.from_string("{{ value|int }}")
result = t.render(value="3")
assert isinstance(result, int)
assert result == 3
def test_list_add(env):
t = env.from_string("{{ a + b }}")
result = t.render(a=["a", "b"], b=["c", "d"])
assert isinstance(result, list)
assert result == ["a", "b", "c", "d"]
def test_multi_expression_add(env):
t = env.from_string("{{ a }} + {{ b }}")
result = t.render(a=["a", "b"], b=["c", "d"])
assert not isinstance(result, list)
assert result == "['a', 'b'] + ['c', 'd']"
def test_loops(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=["a", "b", "c", "d"])
assert isinstance(result, str)
assert result == "abcd"
def test_loops_with_ints(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=[1, 2, 3, 4])
assert isinstance(result, int)
assert result == 1234
def test_loop_look_alike(env):
t = env.from_string("{% for x in value %}{{ x }}{% endfor %}")
result = t.render(value=[1])
assert isinstance(result, int)
assert result == 1
@pytest.mark.parametrize(
("source", "expect"),
(
("{{ value }}", True),
("{{ value }}", False),
("{{ 1 == 1 }}", True),
("{{ 2 + 2 == 5 }}", False),
("{{ None is none }}", True),
("{{ '' == None }}", False),
),
)
def test_booleans(env, source, expect):
t = env.from_string(source)
result = t.render(value=expect)
assert isinstance(result, bool)
assert result is expect
def test_variable_dunder(env):
t = env.from_string("{{ x.__class__ }}")
result = t.render(x=True)
assert isinstance(result, type)
def test_constant_dunder(env):
t = env.from_string("{{ true.__class__ }}")
result = t.render()
assert isinstance(result, type)
def test_constant_dunder_to_string(env):
t = env.from_string("{{ true.__class__|string }}")
result = t.render()
assert not isinstance(result, type)
assert result in {"<type 'bool'>", "<class 'bool'>"}
def test_string_literal_var(env):
t = env.from_string("[{{ 'all' }}]")
result = t.render()
assert isinstance(result, str)
assert result == "[all]"
def test_string_top_level(env):
t = env.from_string("'Jinja'")
result = t.render()
assert result == "Jinja"
def test_tuple_of_variable_strings(env):
t = env.from_string("'{{ a }}', 'data', '{{ b }}', b'{{ c }}'")
result = t.render(a=1, b=2, c="bytes")
assert isinstance(result, tuple)
assert result == ("1", "data", "2", b"bytes")
def test_concat_strings_with_quotes(env):
t = env.from_string("--host='{{ host }}' --user \"{{ user }}\"")
result = t.render(host="localhost", user="Jinja")
assert result == "--host='localhost' --user \"Jinja\""
def test_no_intermediate_eval(env):
t = env.from_string("0.000{{ a }}")
result = t.render(a=7)
assert isinstance(result, float)
# If intermediate eval happened, 0.000 would render 0.0, then 7
# would be appended, resulting in 0.07.
assert math.isclose(result, 0.0007)
def test_spontaneous_env():
t = NativeTemplate("{{ true }}")
assert isinstance(t.environment, NativeEnvironment)
def test_leading_spaces(env):
t = env.from_string(" {{ True }}")
result = t.render()
assert result == " True"
def test_macro(env):
t = env.from_string("{%- macro x() -%}{{- [1,2] -}}{%- endmacro -%}{{- x()[1] -}}")
result = t.render()
assert result == 2
assert isinstance(result, int)
| 4,275 | 25.233129 | 87 | py |
jinja | jinja-main/tests/test_async.py | import asyncio
import pytest
from jinja2 import ChainableUndefined
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import Template
from jinja2.async_utils import auto_aiter
from jinja2.exceptions import TemplateNotFound
from jinja2.exceptions import TemplatesNotFound
from jinja2.exceptions import UndefinedError
from jinja2.nativetypes import NativeEnvironment
def test_basic_async():
t = Template(
"{% for item in [1, 2, 3] %}[{{ item }}]{% endfor %}", enable_async=True
)
async def func():
return await t.render_async()
rv = asyncio.run(func())
assert rv == "[1][2][3]"
def test_await_on_calls():
t = Template("{{ async_func() + normal_func() }}", enable_async=True)
async def async_func():
return 42
def normal_func():
return 23
async def func():
return await t.render_async(async_func=async_func, normal_func=normal_func)
rv = asyncio.run(func())
assert rv == "65"
def test_await_on_calls_normal_render():
t = Template("{{ async_func() + normal_func() }}", enable_async=True)
async def async_func():
return 42
def normal_func():
return 23
rv = t.render(async_func=async_func, normal_func=normal_func)
assert rv == "65"
def test_await_and_macros():
t = Template(
"{% macro foo(x) %}[{{ x }}][{{ async_func() }}]{% endmacro %}{{ foo(42) }}",
enable_async=True,
)
async def async_func():
return 42
async def func():
return await t.render_async(async_func=async_func)
rv = asyncio.run(func())
assert rv == "[42][42]"
def test_async_blocks():
t = Template(
"{% block foo %}<Test>{% endblock %}{{ self.foo() }}",
enable_async=True,
autoescape=True,
)
async def func():
return await t.render_async()
rv = asyncio.run(func())
assert rv == "<Test><Test>"
def test_async_generate():
t = Template("{% for x in [1, 2, 3] %}{{ x }}{% endfor %}", enable_async=True)
rv = list(t.generate())
assert rv == ["1", "2", "3"]
def test_async_iteration_in_templates():
t = Template("{% for x in rng %}{{ x }}{% endfor %}", enable_async=True)
async def async_iterator():
for item in [1, 2, 3]:
yield item
rv = list(t.generate(rng=async_iterator()))
assert rv == ["1", "2", "3"]
def test_async_iteration_in_templates_extended():
t = Template(
"{% for x in rng %}{{ loop.index0 }}/{{ x }}{% endfor %}", enable_async=True
)
stream = t.generate(rng=auto_aiter(range(1, 4)))
assert next(stream) == "0"
assert "".join(stream) == "/11/22/3"
@pytest.fixture
def test_env_async():
env = Environment(
loader=DictLoader(
dict(
module="{% macro test() %}[{{ foo }}|{{ bar }}]{% endmacro %}",
header="[{{ foo }}|{{ 23 }}]",
o_printer="({{ o }})",
)
),
enable_async=True,
)
env.globals["bar"] = 23
return env
class TestAsyncImports:
def test_context_imports(self, test_env_async):
t = test_env_async.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env_async.from_string(
'{% import "module" as m without context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env_async.from_string(
'{% import "module" as m with context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string('{% from "module" import test %}{{ test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env_async.from_string(
'{% from "module" import test without context %}{{ test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env_async.from_string(
'{% from "module" import test with context %}{{ test() }}'
)
assert t.render(foo=42) == "[42|23]"
def test_trailing_comma(self, test_env_async):
test_env_async.from_string('{% from "foo" import bar, baz with context %}')
test_env_async.from_string('{% from "foo" import bar, baz, with context %}')
test_env_async.from_string('{% from "foo" import bar, with context %}')
test_env_async.from_string('{% from "foo" import bar, with, context %}')
test_env_async.from_string('{% from "foo" import bar, with with context %}')
def test_exports(self, test_env_async):
coro = test_env_async.from_string(
"""
{% macro toplevel() %}...{% endmacro %}
{% macro __private() %}...{% endmacro %}
{% set variable = 42 %}
{% for item in [1] %}
{% macro notthere() %}{% endmacro %}
{% endfor %}
"""
)._get_default_module_async()
m = asyncio.run(coro)
assert asyncio.run(m.toplevel()) == "..."
assert not hasattr(m, "__missing")
assert m.variable == 42
assert not hasattr(m, "notthere")
def test_import_with_globals(self, test_env_async):
t = test_env_async.from_string(
'{% import "module" as m %}{{ m.test() }}', globals={"foo": 42}
)
assert t.render() == "[42|23]"
t = test_env_async.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render() == "[|23]"
def test_import_with_globals_override(self, test_env_async):
t = test_env_async.from_string(
'{% set foo = 41 %}{% import "module" as m %}{{ m.test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
def test_from_import_with_globals(self, test_env_async):
t = test_env_async.from_string(
'{% from "module" import test %}{{ test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
class TestAsyncIncludes:
def test_context_include(self, test_env_async):
t = test_env_async.from_string('{% include "header" %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string('{% include "header" with context %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string('{% include "header" without context %}')
assert t.render(foo=42) == "[|23]"
def test_choice_includes(self, test_env_async):
t = test_env_async.from_string('{% include ["missing", "header"] %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string(
'{% include ["missing", "missing2"] ignore missing %}'
)
assert t.render(foo=42) == ""
t = test_env_async.from_string('{% include ["missing", "missing2"] %}')
pytest.raises(TemplateNotFound, t.render)
with pytest.raises(TemplatesNotFound) as e:
t.render()
assert e.value.templates == ["missing", "missing2"]
assert e.value.name == "missing2"
def test_includes(t, **ctx):
ctx["foo"] = 42
assert t.render(ctx) == "[42|23]"
t = test_env_async.from_string('{% include ["missing", "header"] %}')
test_includes(t)
t = test_env_async.from_string("{% include x %}")
test_includes(t, x=["missing", "header"])
t = test_env_async.from_string('{% include [x, "header"] %}')
test_includes(t, x="missing")
t = test_env_async.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env_async.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env_async.from_string("{% include [x] %}")
test_includes(t, x="header")
def test_include_ignoring_missing(self, test_env_async):
t = test_env_async.from_string('{% include "missing" %}')
pytest.raises(TemplateNotFound, t.render)
for extra in "", "with context", "without context":
t = test_env_async.from_string(
'{% include "missing" ignore missing ' + extra + " %}"
)
assert t.render() == ""
def test_context_include_with_overrides(self, test_env_async):
env = Environment(
loader=DictLoader(
dict(
main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}",
item="{{ item }}",
)
)
)
assert env.get_template("main").render() == "123"
def test_unoptimized_scopes(self, test_env_async):
t = test_env_async.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
def test_unoptimized_scopes_autoescape(self):
env = Environment(
loader=DictLoader({"o_printer": "({{ o }})"}),
autoescape=True,
enable_async=True,
)
t = env.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
class TestAsyncForLoop:
def test_simple(self, test_env_async):
tmpl = test_env_async.from_string("{% for item in seq %}{{ item }}{% endfor %}")
assert tmpl.render(seq=list(range(10))) == "0123456789"
def test_else(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for item in seq %}XXX{% else %}...{% endfor %}"
)
assert tmpl.render() == "..."
def test_empty_blocks(self, test_env_async):
tmpl = test_env_async.from_string(
"<{% for item in seq %}{% else %}{% endfor %}>"
)
assert tmpl.render() == "<>"
@pytest.mark.parametrize(
"transform", [lambda x: x, iter, reversed, lambda x: (i for i in x), auto_aiter]
)
def test_context_vars(self, test_env_async, transform):
t = test_env_async.from_string(
"{% for item in seq %}{{ loop.index }}|{{ loop.index0 }}"
"|{{ loop.revindex }}|{{ loop.revindex0 }}|{{ loop.first }}"
"|{{ loop.last }}|{{ loop.length }}\n{% endfor %}"
)
out = t.render(seq=transform([42, 24]))
assert out == "1|0|2|1|True|False|2\n2|1|1|0|False|True|2\n"
def test_cycling(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}"""
)
output = tmpl.render(seq=list(range(4)), through=("<1>", "<2>"))
assert output == "<1><2>" * 4
def test_lookaround(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in seq -%}
{{ loop.previtem|default('x') }}-{{ item }}-{{
loop.nextitem|default('x') }}|
{%- endfor %}"""
)
output = tmpl.render(seq=list(range(4)))
assert output == "x-0-1|0-1-2|1-2-3|2-3-x|"
def test_changed(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in seq -%}
{{ loop.changed(item) }},
{%- endfor %}"""
)
output = tmpl.render(seq=[None, None, 1, 2, 2, 3, 4, 4, 4])
assert output == "True,False,True,True,False,True,True,False,False,"
def test_scope(self, test_env_async):
tmpl = test_env_async.from_string("{% for item in seq %}{% endfor %}{{ item }}")
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self, test_env_async):
def inner():
yield from range(5)
tmpl = test_env_async.from_string(
"{% for item in iter %}{{ item }}{% endfor %}"
)
output = tmpl.render(iter=inner())
assert output == "01234"
def test_noniter(self, test_env_async):
tmpl = test_env_async.from_string("{% for item in none %}...{% endfor %}")
pytest.raises(TypeError, tmpl.render)
def test_recursive(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1<[1][2]>][2<[1][2]>][3<[a]>]"
)
def test_recursive_lookaround(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in seq recursive -%}
[{{ loop.previtem.a if loop.previtem is defined else 'x' }}.{{
item.a }}.{{ loop.nextitem.a if loop.nextitem is defined else 'x'
}}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[x.1.2<[x.1.2][1.2.x]>][1.2.3<[x.1.2][1.2.x]>][2.3.x<[x.a.x]>]"
)
def test_recursive_depth0(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for item in seq recursive %}[{{ loop.depth0 }}:{{ item.a }}"
"{% if item.b %}<{{ loop(item.b) }}>{% endif %}]{% endfor %}"
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]"
)
def test_recursive_depth(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for item in seq recursive %}[{{ loop.depth }}:{{ item.a }}"
"{% if item.b %}<{{ loop(item.b) }}>{% endif %}]{% endfor %}"
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]"
)
def test_looploop(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}"""
)
assert tmpl.render(table=["ab", "cd"]) == "[1|1][1|2][2|1][2|2]"
def test_reversed_bug(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for i in items %}{{ i }}"
"{% if not loop.last %}"
",{% endif %}{% endfor %}"
)
assert tmpl.render(items=reversed([3, 2, 1])) == "1,2,3"
def test_loop_errors(self, test_env_async):
tmpl = test_env_async.from_string(
"""{% for item in [1] if loop.index
== 0 %}...{% endfor %}"""
)
pytest.raises(UndefinedError, tmpl.render)
tmpl = test_env_async.from_string(
"""{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}"""
)
assert tmpl.render() == ""
def test_loop_filter(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for item in range(10) if item is even %}[{{ item }}]{% endfor %}"
)
assert tmpl.render() == "[0][2][4][6][8]"
tmpl = test_env_async.from_string(
"""
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}"""
)
assert tmpl.render() == "[1:0][2:2][3:4][4:6][5:8]"
def test_scoped_special_var(self, test_env_async):
t = test_env_async.from_string(
"{% for s in seq %}[{{ loop.first }}{% for c in s %}"
"|{{ loop.first }}{% endfor %}]{% endfor %}"
)
assert t.render(seq=("ab", "cd")) == "[True|True|False][False|True|False]"
def test_scoped_loop_var(self, test_env_async):
t = test_env_async.from_string(
"{% for x in seq %}{{ loop.first }}"
"{% for y in seq %}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalse"
t = test_env_async.from_string(
"{% for x in seq %}{% for y in seq %}"
"{{ loop.first }}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalseTrueFalse"
def test_recursive_empty_loop_iter(self, test_env_async):
t = test_env_async.from_string(
"""
{%- for item in foo recursive -%}{%- endfor -%}
"""
)
assert t.render(dict(foo=[])) == ""
def test_call_in_loop(self, test_env_async):
t = test_env_async.from_string(
"""
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
"""
)
assert t.render() == "[1][2][3]"
def test_scoping_bug(self, test_env_async):
t = test_env_async.from_string(
"""
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
"""
)
assert t.render(foo=(1,)) == "...1......2..."
def test_unpacking(self, test_env_async):
tmpl = test_env_async.from_string(
"{% for a, b, c in [[1, 2, 3]] %}{{ a }}|{{ b }}|{{ c }}{% endfor %}"
)
assert tmpl.render() == "1|2|3"
def test_recursive_loop_filter(self, test_env_async):
t = test_env_async.from_string(
"""
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{%- for page in [site.root] if page.url != this recursive %}
<url><loc>{{ page.url }}</loc></url>
{{- loop(page.children) }}
{%- endfor %}
</urlset>
"""
)
sm = t.render(
this="/foo",
site={"root": {"url": "/", "children": [{"url": "/foo"}, {"url": "/bar"}]}},
)
lines = [x.strip() for x in sm.splitlines() if x.strip()]
assert lines == [
'<?xml version="1.0" encoding="UTF-8"?>',
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">',
"<url><loc>/</loc></url>",
"<url><loc>/bar</loc></url>",
"</urlset>",
]
def test_nonrecursive_loop_filter(self, test_env_async):
t = test_env_async.from_string(
"""
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{%- for page in items if page.url != this %}
<url><loc>{{ page.url }}</loc></url>
{%- endfor %}
</urlset>
"""
)
sm = t.render(
this="/foo", items=[{"url": "/"}, {"url": "/foo"}, {"url": "/bar"}]
)
lines = [x.strip() for x in sm.splitlines() if x.strip()]
assert lines == [
'<?xml version="1.0" encoding="UTF-8"?>',
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">',
"<url><loc>/</loc></url>",
"<url><loc>/bar</loc></url>",
"</urlset>",
]
def test_bare_async(self, test_env_async):
t = test_env_async.from_string('{% extends "header" %}')
assert t.render(foo=42) == "[42|23]"
def test_awaitable_property_slicing(self, test_env_async):
t = test_env_async.from_string("{% for x in a.b[:1] %}{{ x }}{% endfor %}")
assert t.render(a=dict(b=[1, 2, 3])) == "1"
def test_namespace_awaitable(test_env_async):
async def _test():
t = test_env_async.from_string(
'{% set ns = namespace(foo="Bar") %}{{ ns.foo }}'
)
actual = await t.render_async()
assert actual == "Bar"
asyncio.run(_test())
def test_chainable_undefined_aiter():
async def _test():
t = Template(
"{% for x in a['b']['c'] %}{{ x }}{% endfor %}",
enable_async=True,
undefined=ChainableUndefined,
)
rv = await t.render_async(a={})
assert rv == ""
asyncio.run(_test())
@pytest.fixture
def async_native_env():
return NativeEnvironment(enable_async=True)
def test_native_async(async_native_env):
async def _test():
t = async_native_env.from_string("{{ x }}")
rv = await t.render_async(x=23)
assert rv == 23
asyncio.run(_test())
def test_native_list_async(async_native_env):
async def _test():
t = async_native_env.from_string("{{ x }}")
rv = await t.render_async(x=list(range(3)))
assert rv == [0, 1, 2]
asyncio.run(_test())
def test_getitem_after_filter():
env = Environment(enable_async=True)
env.filters["add_each"] = lambda v, x: [i + x for i in v]
t = env.from_string("{{ (a|add_each(2))[1:] }}")
out = t.render(a=range(3))
assert out == "[3, 4]"
def test_getitem_after_call():
env = Environment(enable_async=True)
env.globals["add_each"] = lambda v, x: [i + x for i in v]
t = env.from_string("{{ add_each(a, 2)[1:] }}")
out = t.render(a=range(3))
assert out == "[3, 4]"
| 22,225 | 32.624811 | 88 | py |
jinja | jinja-main/tests/test_imports.py | import pytest
from jinja2.environment import Environment
from jinja2.exceptions import TemplateNotFound
from jinja2.exceptions import TemplatesNotFound
from jinja2.exceptions import TemplateSyntaxError
from jinja2.exceptions import UndefinedError
from jinja2.loaders import DictLoader
@pytest.fixture
def test_env():
env = Environment(
loader=DictLoader(
dict(
module="{% macro test() %}[{{ foo }}|{{ bar }}]{% endmacro %}",
header="[{{ foo }}|{{ 23 }}]",
o_printer="({{ o }})",
)
)
)
env.globals["bar"] = 23
return env
class TestImports:
def test_context_imports(self, test_env):
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m without context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m with context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% from "module" import test %}{{ test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test without context %}{{ test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test with context %}{{ test() }}'
)
assert t.render(foo=42) == "[42|23]"
def test_import_needs_name(self, test_env):
test_env.from_string('{% from "foo" import bar %}')
test_env.from_string('{% from "foo" import bar, baz %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import %}')
def test_no_trailing_comma(self, test_env):
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import, %}')
def test_trailing_comma_with_context(self, test_env):
test_env.from_string('{% from "foo" import bar, baz with context %}')
test_env.from_string('{% from "foo" import bar, baz, with context %}')
test_env.from_string('{% from "foo" import bar, with context %}')
test_env.from_string('{% from "foo" import bar, with, context %}')
test_env.from_string('{% from "foo" import bar, with with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar with context, %}')
def test_exports(self, test_env):
m = test_env.from_string(
"""
{% macro toplevel() %}...{% endmacro %}
{% macro __private() %}...{% endmacro %}
{% set variable = 42 %}
{% for item in [1] %}
{% macro notthere() %}{% endmacro %}
{% endfor %}
"""
).module
assert m.toplevel() == "..."
assert not hasattr(m, "__missing")
assert m.variable == 42
assert not hasattr(m, "notthere")
def test_not_exported(self, test_env):
t = test_env.from_string("{% from 'module' import nothing %}{{ nothing() }}")
with pytest.raises(UndefinedError, match="does not export the requested name"):
t.render()
def test_import_with_globals(self, test_env):
t = test_env.from_string(
'{% import "module" as m %}{{ m.test() }}', globals={"foo": 42}
)
assert t.render() == "[42|23]"
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render() == "[|23]"
def test_import_with_globals_override(self, test_env):
t = test_env.from_string(
'{% set foo = 41 %}{% import "module" as m %}{{ m.test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
def test_from_import_with_globals(self, test_env):
t = test_env.from_string(
'{% from "module" import test %}{{ test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
class TestIncludes:
def test_context_include(self, test_env):
t = test_env.from_string('{% include "header" %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include "header" with context %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include "header" without context %}')
assert t.render(foo=42) == "[|23]"
def test_choice_includes(self, test_env):
t = test_env.from_string('{% include ["missing", "header"] %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include ["missing", "missing2"] ignore missing %}')
assert t.render(foo=42) == ""
t = test_env.from_string('{% include ["missing", "missing2"] %}')
pytest.raises(TemplateNotFound, t.render)
with pytest.raises(TemplatesNotFound) as e:
t.render()
assert e.value.templates == ["missing", "missing2"]
assert e.value.name == "missing2"
def test_includes(t, **ctx):
ctx["foo"] = 42
assert t.render(ctx) == "[42|23]"
t = test_env.from_string('{% include ["missing", "header"] %}')
test_includes(t)
t = test_env.from_string("{% include x %}")
test_includes(t, x=["missing", "header"])
t = test_env.from_string('{% include [x, "header"] %}')
test_includes(t, x="missing")
t = test_env.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env.from_string("{% include [x] %}")
test_includes(t, x="header")
def test_include_ignoring_missing(self, test_env):
t = test_env.from_string('{% include "missing" %}')
pytest.raises(TemplateNotFound, t.render)
for extra in "", "with context", "without context":
t = test_env.from_string(
'{% include "missing" ignore missing ' + extra + " %}"
)
assert t.render() == ""
def test_context_include_with_overrides(self, test_env):
env = Environment(
loader=DictLoader(
dict(
main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}",
item="{{ item }}",
)
)
)
assert env.get_template("main").render() == "123"
def test_unoptimized_scopes(self, test_env):
t = test_env.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
def test_import_from_with_context(self):
env = Environment(
loader=DictLoader({"a": "{% macro x() %}{{ foobar }}{% endmacro %}"})
)
t = env.from_string(
"{% set foobar = 42 %}{% from 'a' import x with context %}{{ x() }}"
)
assert t.render() == "42"
| 7,571 | 35.757282 | 88 | py |
jinja | jinja-main/tests/test_loader.py | import importlib.abc
import importlib.machinery
import importlib.util
import os
import platform
import shutil
import sys
import tempfile
import time
import weakref
from pathlib import Path
import pytest
from jinja2 import Environment
from jinja2 import loaders
from jinja2 import PackageLoader
from jinja2.exceptions import TemplateNotFound
from jinja2.loaders import split_template_path
class TestLoaders:
def test_dict_loader(self, dict_loader):
env = Environment(loader=dict_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_package_loader(self, package_loader):
env = Environment(loader=package_loader)
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_filesystem_loader_overlapping_names(self, filesystem_loader):
t2_dir = Path(filesystem_loader.searchpath[0]) / ".." / "templates2"
# Make "foo" show up before "foo/test.html".
filesystem_loader.searchpath.insert(0, t2_dir)
e = Environment(loader=filesystem_loader)
e.get_template("foo")
# This would raise NotADirectoryError if "t2/foo" wasn't skipped.
e.get_template("foo/test.html")
def test_choice_loader(self, choice_loader):
env = Environment(loader=choice_loader)
tmpl = env.get_template("justdict.html")
assert tmpl.render().strip() == "FOO"
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_function_loader(self, function_loader):
env = Environment(loader=function_loader)
tmpl = env.get_template("justfunction.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_prefix_loader(self, prefix_loader):
env = Environment(loader=prefix_loader)
tmpl = env.get_template("a/test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("b/justdict.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing")
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return "foo", None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template("template")
assert tmpl is env.get_template("template")
changed = True
assert tmpl is not env.get_template("template")
changed = False
def test_no_cache(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping), cache_size=0)
assert env.get_template("foo") is not env.get_template("foo")
def test_limited_size_cache(self):
mapping = {"one": "foo", "two": "bar", "three": "baz"}
loader = loaders.DictLoader(mapping)
env = Environment(loader=loader, cache_size=2)
t1 = env.get_template("one")
t2 = env.get_template("two")
assert t2 is env.get_template("two")
assert t1 is env.get_template("one")
env.get_template("three")
loader_ref = weakref.ref(loader)
assert (loader_ref, "one") in env.cache
assert (loader_ref, "two") not in env.cache
assert (loader_ref, "three") in env.cache
def test_cache_loader_change(self):
loader1 = loaders.DictLoader({"foo": "one"})
loader2 = loaders.DictLoader({"foo": "two"})
env = Environment(loader=loader1, cache_size=2)
assert env.get_template("foo").render() == "one"
env.loader = loader2
assert env.get_template("foo").render() == "two"
def test_dict_loader_cache_invalidates(self):
mapping = {"foo": "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template("foo").render() == "one"
mapping["foo"] = "two"
assert env.get_template("foo").render() == "two"
def test_split_template_path(self):
assert split_template_path("foo/bar") == ["foo", "bar"]
assert split_template_path("./foo/bar") == ["foo", "bar"]
pytest.raises(TemplateNotFound, split_template_path, "../foo")
class TestFileSystemLoader:
searchpath = (Path(__file__) / ".." / "res" / "templates").resolve()
@staticmethod
def _test_common(env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("foo/test.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_searchpath_as_str(self):
filesystem_loader = loaders.FileSystemLoader(str(self.searchpath))
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_list_including_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(
["/tmp/templates", self.searchpath]
)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_caches_template_based_on_mtime(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
tmpl1 = env.get_template("test.html")
tmpl2 = env.get_template("test.html")
assert tmpl1 is tmpl2
os.utime(self.searchpath / "test.html", (time.time(), time.time()))
tmpl3 = env.get_template("test.html")
assert tmpl1 is not tmpl3
@pytest.mark.parametrize(
("encoding", "expect"),
[
("utf-8", "文字化け"),
("iso-8859-1", "æ\x96\x87\xe5\xad\x97\xe5\x8c\x96\xe3\x81\x91"),
],
)
def test_uses_specified_encoding(self, encoding, expect):
loader = loaders.FileSystemLoader(self.searchpath, encoding=encoding)
e = Environment(loader=loader)
t = e.get_template("mojibake.txt")
assert t.render() == expect
def test_filename_normpath(self):
"""Nested template names should only contain ``os.sep`` in the
loaded filename.
"""
loader = loaders.FileSystemLoader(self.searchpath)
e = Environment(loader=loader)
t = e.get_template("foo/test.html")
assert t.filename == str(self.searchpath / "foo" / "test.html")
class TestModuleLoader:
archive = None
mod_env = None
def compile_down(self, prefix_loader, zip="deflated"):
log = []
self.reg_env = Environment(loader=prefix_loader)
if zip is not None:
fd, self.archive = tempfile.mkstemp(suffix=".zip")
os.close(fd)
else:
self.archive = tempfile.mkdtemp()
self.reg_env.compile_templates(self.archive, zip=zip, log_function=log.append)
self.mod_env = Environment(loader=loaders.ModuleLoader(self.archive))
return "".join(log)
def teardown_method(self):
if self.archive is not None:
if os.path.isfile(self.archive):
os.remove(self.archive)
else:
shutil.rmtree(self.archive)
self.archive = None
self.mod_env = None
def test_log(self, prefix_loader):
log = self.compile_down(prefix_loader)
assert (
'Compiled "a/foo/test.html" as '
"tmpl_a790caf9d669e39ea4d280d597ec891c4ef0404a" in log
)
assert "Finished compiling templates" in log
assert (
'Could not compile "a/syntaxerror.html": '
"Encountered unknown tag 'endif'" in log
)
def _test_common(self):
tmpl1 = self.reg_env.get_template("a/test.html")
tmpl2 = self.mod_env.get_template("a/test.html")
assert tmpl1.render() == tmpl2.render()
tmpl1 = self.reg_env.get_template("b/justdict.html")
tmpl2 = self.mod_env.get_template("b/justdict.html")
assert tmpl1.render() == tmpl2.render()
def test_deflated_zip_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip="deflated")
self._test_common()
def test_stored_zip_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip="stored")
self._test_common()
def test_filesystem_compile(self, prefix_loader):
self.compile_down(prefix_loader, zip=None)
self._test_common()
def test_weak_references(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.get_template("a/test.html")
key = loaders.ModuleLoader.get_template_key("a/test.html")
name = self.mod_env.loader.module.__name__
assert hasattr(self.mod_env.loader.module, key)
assert name in sys.modules
# unset all, ensure the module is gone from sys.modules
self.mod_env = None
try:
import gc
gc.collect()
except BaseException:
pass
assert name not in sys.modules
def test_choice_loader(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.loader = loaders.ChoiceLoader(
[self.mod_env.loader, loaders.DictLoader({"DICT_SOURCE": "DICT_TEMPLATE"})]
)
tmpl1 = self.mod_env.get_template("a/test.html")
assert tmpl1.render() == "BAR"
tmpl2 = self.mod_env.get_template("DICT_SOURCE")
assert tmpl2.render() == "DICT_TEMPLATE"
def test_prefix_loader(self, prefix_loader):
self.compile_down(prefix_loader)
self.mod_env.loader = loaders.PrefixLoader(
{
"MOD": self.mod_env.loader,
"DICT": loaders.DictLoader({"test.html": "DICT_TEMPLATE"}),
}
)
tmpl1 = self.mod_env.get_template("MOD/a/test.html")
assert tmpl1.render() == "BAR"
tmpl2 = self.mod_env.get_template("DICT/test.html")
assert tmpl2.render() == "DICT_TEMPLATE"
def test_path_as_pathlib(self, prefix_loader):
self.compile_down(prefix_loader)
mod_path = self.mod_env.loader.module.__path__[0]
mod_loader = loaders.ModuleLoader(Path(mod_path))
self.mod_env = Environment(loader=mod_loader)
self._test_common()
def test_supports_pathlib_in_list_of_paths(self, prefix_loader):
self.compile_down(prefix_loader)
mod_path = self.mod_env.loader.module.__path__[0]
mod_loader = loaders.ModuleLoader([Path(mod_path), "/tmp/templates"])
self.mod_env = Environment(loader=mod_loader)
self._test_common()
@pytest.fixture()
def package_dir_loader(monkeypatch):
monkeypatch.syspath_prepend(Path(__file__).parent)
return PackageLoader("res")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_dir_source(package_dir_loader, template, expect):
source, name, up_to_date = package_dir_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date()
def test_package_dir_list(package_dir_loader):
templates = package_dir_loader.list_templates()
assert "foo/test.html" in templates
assert "test.html" in templates
@pytest.fixture()
def package_file_loader(monkeypatch):
monkeypatch.syspath_prepend(Path(__file__).parent / "res")
return PackageLoader("__init__")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_file_source(package_file_loader, template, expect):
source, name, up_to_date = package_file_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date()
def test_package_file_list(package_file_loader):
templates = package_file_loader.list_templates()
assert "foo/test.html" in templates
assert "test.html" in templates
@pytest.fixture()
def package_zip_loader(monkeypatch):
package_zip = (Path(__file__) / ".." / "res" / "package.zip").resolve()
monkeypatch.syspath_prepend(package_zip)
return PackageLoader("t_pack")
@pytest.mark.parametrize(
("template", "expect"), [("foo/test.html", "FOO"), ("test.html", "BAR")]
)
def test_package_zip_source(package_zip_loader, template, expect):
source, name, up_to_date = package_zip_loader.get_source(None, template)
assert source.rstrip() == expect
assert name.endswith(os.path.join(*split_template_path(template)))
assert up_to_date is None
@pytest.mark.xfail(
platform.python_implementation() == "PyPy",
reason="PyPy's zipimporter doesn't have a '_files' attribute.",
raises=TypeError,
)
def test_package_zip_list(package_zip_loader):
assert package_zip_loader.list_templates() == ["foo/test.html", "test.html"]
@pytest.mark.parametrize("package_path", ["", ".", "./"])
def test_package_zip_omit_curdir(package_zip_loader, package_path):
"""PackageLoader should not add or include "." or "./" in the root
path, it is invalid in zip paths.
"""
loader = PackageLoader("t_pack", package_path)
assert loader.package_path == ""
source, _, _ = loader.get_source(None, "templates/foo/test.html")
assert source.rstrip() == "FOO"
def test_pep_451_import_hook():
class ImportHook(importlib.abc.MetaPathFinder, importlib.abc.Loader):
def find_spec(self, name, path=None, target=None):
if name != "res":
return None
spec = importlib.machinery.PathFinder.find_spec(name)
return importlib.util.spec_from_file_location(
name,
spec.origin,
loader=self,
submodule_search_locations=spec.submodule_search_locations,
)
def create_module(self, spec):
return None # default behaviour is fine
def exec_module(self, module):
return None # we need this to satisfy the interface, it's wrong
# ensure we restore `sys.meta_path` after putting in our loader
before = sys.meta_path[:]
try:
sys.meta_path.insert(0, ImportHook())
package_loader = PackageLoader("res")
assert "test.html" in package_loader.list_templates()
finally:
sys.meta_path[:] = before
| 14,874 | 34.843373 | 87 | py |
jinja | jinja-main/tests/test_bytecode_cache.py | import pytest
from jinja2 import Environment
from jinja2.bccache import Bucket
from jinja2.bccache import FileSystemBytecodeCache
from jinja2.bccache import MemcachedBytecodeCache
from jinja2.exceptions import TemplateNotFound
@pytest.fixture
def env(package_loader, tmp_path):
bytecode_cache = FileSystemBytecodeCache(str(tmp_path))
return Environment(loader=package_loader, bytecode_cache=bytecode_cache)
class TestByteCodeCache:
def test_simple(self, env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
class MockMemcached:
class Error(Exception):
pass
key = None
value = None
timeout = None
def get(self, key):
return self.value
def set(self, key, value, timeout=None):
self.key = key
self.value = value
self.timeout = timeout
def get_side_effect(self, key):
raise self.Error()
def set_side_effect(self, *args):
raise self.Error()
class TestMemcachedBytecodeCache:
def test_dump_load(self):
memcached = MockMemcached()
m = MemcachedBytecodeCache(memcached)
b = Bucket(None, "key", "")
b.code = "code"
m.dump_bytecode(b)
assert memcached.key == "jinja2/bytecode/key"
b = Bucket(None, "key", "")
m.load_bytecode(b)
assert b.code == "code"
def test_exception(self):
memcached = MockMemcached()
memcached.get = memcached.get_side_effect
memcached.set = memcached.set_side_effect
m = MemcachedBytecodeCache(memcached)
b = Bucket(None, "key", "")
b.code = "code"
m.dump_bytecode(b)
m.load_bytecode(b)
m.ignore_memcache_errors = False
with pytest.raises(MockMemcached.Error):
m.dump_bytecode(b)
with pytest.raises(MockMemcached.Error):
m.load_bytecode(b)
| 1,984 | 24.448718 | 76 | py |
jinja | jinja-main/tests/test_compile.py | import os
import re
from jinja2.environment import Environment
from jinja2.loaders import DictLoader
def test_filters_deterministic(tmp_path):
src = "".join(f"{{{{ {i}|filter{i} }}}}" for i in range(10))
env = Environment(loader=DictLoader({"foo": src}))
env.filters.update(dict.fromkeys((f"filter{i}" for i in range(10)), lambda: None))
env.compile_templates(tmp_path, zip=None)
name = os.listdir(tmp_path)[0]
content = (tmp_path / name).read_text("utf8")
expect = [f"filters['filter{i}']" for i in range(10)]
found = re.findall(r"filters\['filter\d']", content)
assert found == expect
def test_import_as_with_context_deterministic(tmp_path):
src = "\n".join(f'{{% import "bar" as bar{i} with context %}}' for i in range(10))
env = Environment(loader=DictLoader({"foo": src}))
env.compile_templates(tmp_path, zip=None)
name = os.listdir(tmp_path)[0]
content = (tmp_path / name).read_text("utf8")
expect = [f"'bar{i}': " for i in range(10)]
found = re.findall(r"'bar\d': ", content)[:10]
assert found == expect
| 1,084 | 36.413793 | 86 | py |
jinja | jinja-main/tests/test_async_filters.py | from collections import namedtuple
import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2.async_utils import auto_aiter
async def make_aiter(iter):
for item in iter:
yield item
def mark_dualiter(parameter, factory):
def decorator(f):
return pytest.mark.parametrize(
parameter, [lambda: factory(), lambda: make_aiter(factory())]
)(f)
return decorator
@pytest.fixture
def env_async():
return Environment(enable_async=True)
@mark_dualiter("foo", lambda: range(10))
def test_first(env_async, foo):
tmpl = env_async.from_string("{{ foo()|first }}")
out = tmpl.render(foo=foo)
assert out == "0"
@mark_dualiter(
"items",
lambda: [
{"foo": 1, "bar": 2},
{"foo": 2, "bar": 3},
{"foo": 1, "bar": 1},
{"foo": 3, "bar": 4},
],
)
def test_groupby(env_async, items):
tmpl = env_async.from_string(
"""
{%- for grouper, list in items()|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(items=items).split("|") == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
"",
]
@pytest.mark.parametrize(
("case_sensitive", "expect"),
[
(False, "a: 1, 3\nb: 2\n"),
(True, "A: 3\na: 1\nb: 2\n"),
],
)
def test_groupby_case(env_async, case_sensitive, expect):
tmpl = env_async.from_string(
"{% for k, vs in data|groupby('k', case_sensitive=cs) %}"
"{{ k }}: {{ vs|join(', ', attribute='v') }}\n"
"{% endfor %}"
)
out = tmpl.render(
data=[{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "A", "v": 3}],
cs=case_sensitive,
)
assert out == expect
@mark_dualiter("items", lambda: [("a", 1), ("a", 2), ("b", 1)])
def test_groupby_tuple_index(env_async, items):
tmpl = env_async.from_string(
"""
{%- for grouper, list in items()|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(items=items) == "a:1:2|b:1|"
def make_articles():
Date = namedtuple("Date", "day,month,year")
Article = namedtuple("Article", "title,date")
return [
Article("aha", Date(1, 1, 1970)),
Article("interesting", Date(2, 1, 1970)),
Article("really?", Date(3, 1, 1970)),
Article("totally not", Date(1, 1, 1971)),
]
@mark_dualiter("articles", make_articles)
def test_groupby_multidot(env_async, articles):
tmpl = env_async.from_string(
"""
{%- for year, list in articles()|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(articles=articles).split("|") == [
"1970[aha][interesting][really?]",
"1971[totally not]",
"",
]
@mark_dualiter("int_items", lambda: [1, 2, 3])
def test_join_env_int(env_async, int_items):
tmpl = env_async.from_string('{{ items()|join("|") }}')
out = tmpl.render(items=int_items)
assert out == "1|2|3"
@mark_dualiter("string_items", lambda: ["<foo>", Markup("<span>foo</span>")])
def test_join_string_list(string_items):
env2 = Environment(autoescape=True, enable_async=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render(items=string_items) == "<foo><span>foo</span>"
def make_users():
User = namedtuple("User", "username")
return map(User, ["foo", "bar"])
@mark_dualiter("users", make_users)
def test_join_attribute(env_async, users):
tmpl = env_async.from_string("""{{ users()|join(', ', 'username') }}""")
assert tmpl.render(users=users) == "foo, bar"
@mark_dualiter("items", lambda: [1, 2, 3, 4, 5])
def test_simple_reject(env_async, items):
tmpl = env_async.from_string('{{ items()|reject("odd")|join("|") }}')
assert tmpl.render(items=items) == "2|4"
@mark_dualiter("items", lambda: [None, False, 0, 1, 2, 3, 4, 5])
def test_bool_reject(env_async, items):
tmpl = env_async.from_string('{{ items()|reject|join("|") }}')
assert tmpl.render(items=items) == "None|False|0"
@mark_dualiter("items", lambda: [1, 2, 3, 4, 5])
def test_simple_select(env_async, items):
tmpl = env_async.from_string('{{ items()|select("odd")|join("|") }}')
assert tmpl.render(items=items) == "1|3|5"
@mark_dualiter("items", lambda: [None, False, 0, 1, 2, 3, 4, 5])
def test_bool_select(env_async, items):
tmpl = env_async.from_string('{{ items()|select|join("|") }}')
assert tmpl.render(items=items) == "1|2|3|4|5"
def make_users(): # type: ignore
User = namedtuple("User", "name,is_active")
return [
User("john", True),
User("jane", True),
User("mike", False),
]
@mark_dualiter("users", make_users)
def test_simple_select_attr(env_async, users):
tmpl = env_async.from_string(
'{{ users()|selectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|jane"
@mark_dualiter("items", lambda: list("123"))
def test_simple_map(env_async, items):
tmpl = env_async.from_string('{{ items()|map("int")|sum }}')
assert tmpl.render(items=items) == "6"
def test_map_sum(env_async): # async map + async filter
tmpl = env_async.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == "[3, 3, 15]"
@mark_dualiter("users", make_users)
def test_attribute_map(env_async, users):
tmpl = env_async.from_string('{{ users()|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == "john|jane|mike"
def test_empty_map(env_async):
tmpl = env_async.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == "[]"
@mark_dualiter("items", lambda: [1, 2, 3, 4, 5, 6])
def test_sum(env_async, items):
tmpl = env_async.from_string("""{{ items()|sum }}""")
assert tmpl.render(items=items) == "21"
@mark_dualiter("items", lambda: [{"value": 23}, {"value": 1}, {"value": 18}])
def test_sum_attributes(env_async, items):
tmpl = env_async.from_string("""{{ items()|sum('value') }}""")
assert tmpl.render(items=items)
def test_sum_attributes_nested(env_async):
tmpl = env_async.from_string("""{{ values|sum('real.value') }}""")
assert (
tmpl.render(
values=[
{"real": {"value": 23}},
{"real": {"value": 1}},
{"real": {"value": 18}},
]
)
== "42"
)
def test_sum_attributes_tuple(env_async):
tmpl = env_async.from_string("""{{ values.items()|sum('1') }}""")
assert tmpl.render(values={"foo": 23, "bar": 1, "baz": 18}) == "42"
@mark_dualiter("items", lambda: range(10))
def test_slice(env_async, items):
tmpl = env_async.from_string(
"{{ items()|slice(3)|list }}|{{ items()|slice(3, 'X')|list }}"
)
out = tmpl.render(items=items)
assert out == (
"[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]"
)
def test_custom_async_filter(env_async):
async def customfilter(val):
return str(val)
env_async.filters["customfilter"] = customfilter
tmpl = env_async.from_string("{{ 'static'|customfilter }} {{ arg|customfilter }}")
out = tmpl.render(arg="dynamic")
assert out == "static dynamic"
@mark_dualiter("items", lambda: range(10))
def test_custom_async_iteratable_filter(env_async, items):
async def customfilter(iterable):
items = []
async for item in auto_aiter(iterable):
items.append(str(item))
if len(items) == 3:
break
return ",".join(items)
env_async.filters["customfilter"] = customfilter
tmpl = env_async.from_string(
"{{ items()|customfilter }} .. {{ [3, 4, 5, 6]|customfilter }}"
)
out = tmpl.render(items=items)
assert out == "0,1,2 .. 3,4,5"
| 8,001 | 28.20438 | 86 | py |
jinja | jinja-main/tests/test_ext.py | import re
from io import BytesIO
import pytest
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import nodes
from jinja2 import pass_context
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import count_newlines
from jinja2.lexer import Token
importable_object = 23
_gettext_re = re.compile(r"_\((.*?)\)", re.DOTALL)
i18n_templates = {
"default.html": '<title>{{ page_title|default(_("missing")) }}</title>'
"{% block body %}{% endblock %}",
"child.html": '{% extends "default.html" %}{% block body %}'
"{% trans %}watch out{% endtrans %}{% endblock %}",
"plural.html": "{% trans user_count %}One user online{% pluralize %}"
"{{ user_count }} users online{% endtrans %}",
"plural2.html": "{% trans user_count=get_user_count() %}{{ user_count }}s"
"{% pluralize %}{{ user_count }}p{% endtrans %}",
"stringformat.html": '{{ _("User: %(num)s")|format(num=user_count) }}',
}
newstyle_i18n_templates = {
"default.html": '<title>{{ page_title|default(_("missing")) }}</title>'
"{% block body %}{% endblock %}",
"child.html": '{% extends "default.html" %}{% block body %}'
"{% trans %}watch out{% endtrans %}{% endblock %}",
"plural.html": "{% trans user_count %}One user online{% pluralize %}"
"{{ user_count }} users online{% endtrans %}",
"stringformat.html": '{{ _("User: %(num)s", num=user_count) }}',
"ngettext.html": '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
"ngettext_long.html": "{% trans num=apples %}{{ num }} apple{% pluralize %}"
"{{ num }} apples{% endtrans %}",
"pgettext.html": '{{ pgettext("fruit", "Apple") }}',
"npgettext.html": '{{ npgettext("fruit", "%(num)s apple", "%(num)s apples",'
" apples) }}",
"pgettext_block": "{% trans 'fruit' num=apples %}Apple{% endtrans %}",
"npgettext_block": "{% trans 'fruit' num=apples %}{{ num }} apple"
"{% pluralize %}{{ num }} apples{% endtrans %}",
"transvars1.html": "{% trans %}User: {{ num }}{% endtrans %}",
"transvars2.html": "{% trans num=count %}User: {{ num }}{% endtrans %}",
"transvars3.html": "{% trans count=num %}User: {{ count }}{% endtrans %}",
"novars.html": "{% trans %}%(hello)s{% endtrans %}",
"vars.html": "{% trans %}{{ foo }}%(foo)s{% endtrans %}",
"explicitvars.html": '{% trans foo="42" %}%(foo)s{% endtrans %}',
}
languages = {
"de": {
"missing": "fehlend",
"watch out": "pass auf",
"One user online": "Ein Benutzer online",
"%(user_count)s users online": "%(user_count)s Benutzer online",
"User: %(num)s": "Benutzer: %(num)s",
"User: %(count)s": "Benutzer: %(count)s",
"Apple": {None: "Apfel", "fruit": "Apple"},
"%(num)s apple": {None: "%(num)s Apfel", "fruit": "%(num)s Apple"},
"%(num)s apples": {None: "%(num)s Äpfel", "fruit": "%(num)s Apples"},
}
}
def _get_with_context(value, ctx=None):
if isinstance(value, dict):
return value.get(ctx, value)
return value
@pass_context
def gettext(context, string):
language = context.get("LANGUAGE", "en")
value = languages.get(language, {}).get(string, string)
return _get_with_context(value)
@pass_context
def ngettext(context, s, p, n):
language = context.get("LANGUAGE", "en")
if n != 1:
value = languages.get(language, {}).get(p, p)
return _get_with_context(value)
value = languages.get(language, {}).get(s, s)
return _get_with_context(value)
@pass_context
def pgettext(context, c, s):
language = context.get("LANGUAGE", "en")
value = languages.get(language, {}).get(s, s)
return _get_with_context(value, c)
@pass_context
def npgettext(context, c, s, p, n):
language = context.get("LANGUAGE", "en")
if n != 1:
value = languages.get(language, {}).get(p, p)
return _get_with_context(value, c)
value = languages.get(language, {}).get(s, s)
return _get_with_context(value, c)
i18n_env = Environment(
loader=DictLoader(i18n_templates), extensions=["jinja2.ext.i18n"]
)
i18n_env.globals.update(
{
"_": gettext,
"gettext": gettext,
"ngettext": ngettext,
"pgettext": pgettext,
"npgettext": npgettext,
}
)
i18n_env_trimmed = Environment(extensions=["jinja2.ext.i18n"])
i18n_env_trimmed.policies["ext.i18n.trimmed"] = True
i18n_env_trimmed.globals.update(
{
"_": gettext,
"gettext": gettext,
"ngettext": ngettext,
"pgettext": pgettext,
"npgettext": npgettext,
}
)
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates), extensions=["jinja2.ext.i18n"]
)
newstyle_i18n_env.install_gettext_callables( # type: ignore
gettext, ngettext, newstyle=True, pgettext=pgettext, npgettext=npgettext
)
class ExampleExtension(Extension):
tags = {"test"}
ext_attr = 42
context_reference_node_cls = nodes.ContextReference
def parse(self, parser):
return nodes.Output(
[
self.call_method(
"_dump",
[
nodes.EnvironmentAttribute("sandboxed"),
self.attr("ext_attr"),
nodes.ImportedName(__name__ + ".importable_object"),
self.context_reference_node_cls(),
],
)
]
).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return (
f"{sandboxed}|{ext_attr}|{imported_object}|{context.blocks}"
f"|{context.get('test_var')}"
)
class DerivedExampleExtension(ExampleExtension):
context_reference_node_cls = nodes.DerivedContextReference # type: ignore
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace("[[TEST]]", "({{ foo }})")
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == "data":
yield from self.interpolate(token)
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while True:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos : match.start()]
if value:
yield Token(lineno, "data", value)
lineno += count_newlines(token.value)
yield Token(lineno, "variable_begin", None)
yield Token(lineno, "name", "gettext")
yield Token(lineno, "lparen", None)
yield Token(lineno, "string", match.group(1))
yield Token(lineno, "rparen", None)
yield Token(lineno, "variable_end", None)
pos = match.end()
if pos < end:
yield Token(lineno, "data", token.value[pos:])
class TestExtensions:
def test_extend_late(self):
env = Environment()
t = env.from_string('{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == "<test>"
def test_loop_controls(self):
env = Environment(extensions=["jinja2.ext.loopcontrols"])
tmpl = env.from_string(
"""
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}"""
)
assert tmpl.render() == "13"
tmpl = env.from_string(
"""
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}"""
)
assert tmpl.render() == "12"
def test_do(self):
env = Environment(extensions=["jinja2.ext.do"])
tmpl = env.from_string(
"""
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}"""
)
assert tmpl.render() == "0f, 1o, 2o"
def test_extension_nodes(self):
env = Environment(extensions=[ExampleExtension])
tmpl = env.from_string("{% test %}")
assert tmpl.render() == "False|42|23|{}|None"
def test_contextreference_node_passes_context(self):
env = Environment(extensions=[ExampleExtension])
tmpl = env.from_string('{% set test_var="test_content" %}{% test %}')
assert tmpl.render() == "False|42|23|{}|test_content"
def test_contextreference_node_can_pass_locals(self):
env = Environment(extensions=[DerivedExampleExtension])
tmpl = env.from_string(
'{% for test_var in ["test_content"] %}{% test %}{% endfor %}'
)
assert tmpl.render() == "False|42|23|{}|test_content"
def test_identifier(self):
assert ExampleExtension.identifier == __name__ + ".ExampleExtension"
def test_rebinding(self):
original = Environment(extensions=[ExampleExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in env.extensions.values():
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string("{[[TEST]]}")
assert tmpl.render(foo=42) == "{(42)}"
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals["gettext"] = lambda x: x.upper()
tmpl = env.from_string("Foo _(bar) Baz")
out = tmpl.render()
assert out == "Foo BAR Baz"
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
def test_debug(self):
env = Environment(extensions=["jinja2.ext.debug"])
t = env.from_string("Hello\n{% debug %}\nGoodbye")
out = t.render()
for value in ("context", "cycler", "filters", "abs", "tests", "!="):
assert f"'{value}'" in out
class TestInternationalization:
def test_trans(self):
tmpl = i18n_env.get_template("child.html")
assert tmpl.render(LANGUAGE="de") == "<title>fehlend</title>pass auf"
def test_trans_plural(self):
tmpl = i18n_env.get_template("plural.html")
assert tmpl.render(LANGUAGE="de", user_count=1) == "Ein Benutzer online"
assert tmpl.render(LANGUAGE="de", user_count=2) == "2 Benutzer online"
def test_trans_plural_with_functions(self):
tmpl = i18n_env.get_template("plural2.html")
def get_user_count():
get_user_count.called += 1
return 1
get_user_count.called = 0
assert tmpl.render(LANGUAGE="de", get_user_count=get_user_count) == "1s"
assert get_user_count.called == 1
def test_complex_plural(self):
tmpl = i18n_env.from_string(
"{% trans foo=42, count=2 %}{{ count }} item{% "
"pluralize count %}{{ count }} items{% endtrans %}"
)
assert tmpl.render() == "2 items"
pytest.raises(
TemplateAssertionError,
i18n_env.from_string,
"{% trans foo %}...{% pluralize bar %}...{% endtrans %}",
)
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template("stringformat.html")
assert tmpl.render(LANGUAGE="de", user_count=5) == "Benutzer: 5"
def test_trimmed(self):
tmpl = i18n_env.from_string(
"{%- trans trimmed %} hello\n world {% endtrans -%}"
)
assert tmpl.render() == "hello world"
def test_trimmed_policy(self):
s = "{%- trans %} hello\n world {% endtrans -%}"
tmpl = i18n_env.from_string(s)
trimmed_tmpl = i18n_env_trimmed.from_string(s)
assert tmpl.render() == " hello\n world "
assert trimmed_tmpl.render() == "hello world"
def test_trimmed_policy_override(self):
tmpl = i18n_env_trimmed.from_string(
"{%- trans notrimmed %} hello\n world {% endtrans -%}"
)
assert tmpl.render() == " hello\n world "
def test_trimmed_vars(self):
tmpl = i18n_env.from_string(
'{%- trans trimmed x="world" %} hello\n {{ x }} {% endtrans -%}'
)
assert tmpl.render() == "hello world"
def test_trimmed_varname_trimmed(self):
# unlikely variable name, but when used as a variable
# it should not enable trimming
tmpl = i18n_env.from_string(
"{%- trans trimmed = 'world' %} hello\n {{ trimmed }} {% endtrans -%}"
)
assert tmpl.render() == " hello\n world "
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO(
b"""
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
"""
)
assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], {})) == [
(2, "gettext", "Hello World", []),
(3, "gettext", "Hello World", []),
(4, "ngettext", ("%(users)s user", "%(users)s users", None), []),
]
def test_extract_trimmed(self):
from jinja2.ext import babel_extract
source = BytesIO(
b"""
{{ gettext(' Hello \n World') }}
{% trans trimmed %} Hello \n World{% endtrans %}
{% trans trimmed %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
"""
)
assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], {})) == [
(2, "gettext", " Hello \n World", []),
(4, "gettext", "Hello World", []),
(6, "ngettext", ("%(users)s user", "%(users)s users", None), []),
]
def test_extract_trimmed_option(self):
from jinja2.ext import babel_extract
source = BytesIO(
b"""
{{ gettext(' Hello \n World') }}
{% trans %} Hello \n World{% endtrans %}
{% trans %}{{ users }} \n user
{%- pluralize %}{{ users }} \n users{% endtrans %}
"""
)
opts = {"trimmed": "true"}
assert list(babel_extract(source, ("gettext", "ngettext", "_"), [], opts)) == [
(2, "gettext", " Hello \n World", []),
(4, "gettext", "Hello World", []),
(6, "ngettext", ("%(users)s user", "%(users)s users", None), []),
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO(
b"""
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
"""
)
assert list(
babel_extract(source, ("gettext", "ngettext", "_"), ["trans", ":"], {})
) == [
(3, "gettext", "Hello World", ["first"]),
(4, "gettext", "Hello World", ["second"]),
(6, "ngettext", ("%(users)s user", "%(users)s users", None), ["third"]),
]
def test_extract_context(self):
from jinja2.ext import babel_extract
source = BytesIO(
b"""
{{ pgettext("babel", "Hello World") }}
{{ npgettext("babel", "%(users)s user", "%(users)s users", users) }}
"""
)
assert list(babel_extract(source, ("pgettext", "npgettext", "_"), [], {})) == [
(2, "pgettext", ("babel", "Hello World"), []),
(3, "npgettext", ("babel", "%(users)s user", "%(users)s users", None), []),
]
class TestScope:
def test_basic_scope_behavior(self):
# This is what the old with statement compiled down to
class ScopeExt(Extension):
tags = {"scope"}
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != "block_end":
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect("comma")
target = parser.parse_assign_target()
parser.stream.expect("assign")
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + list(
parser.parse_statements(("name:endscope",), drop_needle=True)
)
return node
env = Environment(extensions=[ScopeExt])
tmpl = env.from_string(
"""\
{%- scope a=1, b=2, c=b, d=e, e=5 -%}
{{ a }}|{{ b }}|{{ c }}|{{ d }}|{{ e }}
{%- endscope -%}
"""
)
assert tmpl.render(b=3, e=4) == "1|2|2|4|5"
class TestNewstyleInternationalization:
def test_trans(self):
tmpl = newstyle_i18n_env.get_template("child.html")
assert tmpl.render(LANGUAGE="de") == "<title>fehlend</title>pass auf"
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template("plural.html")
assert tmpl.render(LANGUAGE="de", user_count=1) == "Ein Benutzer online"
assert tmpl.render(LANGUAGE="de", user_count=2) == "2 Benutzer online"
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string(
"{% trans foo=42, count=2 %}{{ count }} item{% "
"pluralize count %}{{ count }} items{% endtrans %}"
)
assert tmpl.render() == "2 items"
pytest.raises(
TemplateAssertionError,
i18n_env.from_string,
"{% trans foo %}...{% pluralize bar %}...{% endtrans %}",
)
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template("stringformat.html")
assert tmpl.render(LANGUAGE="de", user_count=5) == "Benutzer: 5"
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template("ngettext.html")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apfel"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Äpfel"
def test_autoescape_support(self):
env = Environment(extensions=["jinja2.ext.i18n"])
env.install_gettext_callables(
lambda x: "<strong>Wert: %(name)s</strong>",
lambda s, p, n: s,
newstyle=True,
)
t = env.from_string(
'{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}'
)
assert t.render(ae=True) == "<strong>Wert: <test></strong>"
assert t.render(ae=False) == "<strong>Wert: <test></strong>"
def test_autoescape_macros(self):
env = Environment(autoescape=False)
template = (
"{% macro m() %}<html>{% endmacro %}"
"{% autoescape true %}{{ m() }}{% endautoescape %}"
)
assert env.from_string(template).render() == "<html>"
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template("ngettext_long.html")
assert tmpl.render(apples=5, LANGUAGE="de") == "5 Äpfel"
def test_num_called_num(self):
source = newstyle_i18n_env.compile(
"""
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
""",
raw=True,
)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert (
re.search(r"u?'%\(num\)s apple', u?'%\(num\)s apples', 3", source)
is not None
)
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template("transvars1.html")
t2 = newstyle_i18n_env.get_template("transvars2.html")
t3 = newstyle_i18n_env.get_template("transvars3.html")
assert t1.render(num=1, LANGUAGE="de") == "Benutzer: 1"
assert t2.render(count=23, LANGUAGE="de") == "Benutzer: 23"
assert t3.render(num=42, LANGUAGE="de") == "Benutzer: 42"
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template("novars.html")
assert t.render() == "%(hello)s"
t = newstyle_i18n_env.get_template("vars.html")
assert t.render(foo="42") == "42%(foo)s"
t = newstyle_i18n_env.get_template("explicitvars.html")
assert t.render() == "%(foo)s"
def test_context(self):
tmpl = newstyle_i18n_env.get_template("pgettext.html")
assert tmpl.render(LANGUAGE="de") == "Apple"
def test_context_plural(self):
tmpl = newstyle_i18n_env.get_template("npgettext.html")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apple"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Apples"
def test_context_block(self):
tmpl = newstyle_i18n_env.get_template("pgettext_block")
assert tmpl.render(LANGUAGE="de") == "Apple"
def test_context_plural_block(self):
tmpl = newstyle_i18n_env.get_template("npgettext_block")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apple"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Apples"
class TestAutoEscape:
def test_scoped_setting(self):
env = Environment(autoescape=True)
tmpl = env.from_string(
"""
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
"""
)
assert tmpl.render().split() == [
"<HelloWorld>",
"<HelloWorld>",
"<HelloWorld>",
]
env = Environment(autoescape=False)
tmpl = env.from_string(
"""
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
"""
)
assert tmpl.render().split() == [
"<HelloWorld>",
"<HelloWorld>",
"<HelloWorld>",
]
def test_nonvolatile(self):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string(
'{% autoescape false %}{{ {"foo": "<test>"}'
"|xmlattr|escape }}{% endautoescape %}"
)
assert tmpl.render() == " foo="&lt;test&gt;""
def test_volatile(self):
env = Environment(autoescape=True)
tmpl = env.from_string(
'{% autoescape foo %}{{ {"foo": "<test>"}'
"|xmlattr|escape }}{% endautoescape %}"
)
assert tmpl.render(foo=False) == " foo="&lt;test&gt;""
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment()
tmpl = env.from_string(
'{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}'
)
assert tmpl.render(x=1) == "<x>1<y>"
def test_volatile_scoping(self):
env = Environment()
tmplsource = """
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
"""
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == "Markup"
assert tmpl.render(val=False).split()[0] == "str"
# looking at the source we should see <testing> there in raw
# (and then escaped as well)
env = Environment()
pysource = env.compile(tmplsource, raw=True)
assert "<testing>\\n" in pysource
env = Environment(autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert "<testing>\\n" in pysource
def test_overlay_scopes(self):
class MagicScopeExtension(Extension):
tags = {"overlay"}
def parse(self, parser):
node = nodes.OverlayScope(lineno=next(parser.stream).lineno)
node.body = list(
parser.parse_statements(("name:endoverlay",), drop_needle=True)
)
node.context = self.call_method("get_scope")
return node
def get_scope(self):
return {"x": [1, 2, 3]}
env = Environment(extensions=[MagicScopeExtension])
tmpl = env.from_string(
"""
{{- x }}|{% set z = 99 %}
{%- overlay %}
{{- y }}|{{ z }}|{% for item in x %}[{{ item }}]{% endfor %}
{%- endoverlay %}|
{{- x -}}
"""
)
assert tmpl.render(x=42, y=23) == "42|23|99|[1][2][3]|42"
| 25,754 | 34.42641 | 87 | py |
jinja | jinja-main/tests/test_debug.py | import pickle
import re
from traceback import format_exception
import pytest
from jinja2 import ChoiceLoader
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import TemplateSyntaxError
@pytest.fixture
def fs_env(filesystem_loader):
"""returns a new environment."""
return Environment(loader=filesystem_loader)
class TestDebug:
def assert_traceback_matches(self, callback, expected_tb):
with pytest.raises(Exception) as exc_info:
callback()
tb = format_exception(exc_info.type, exc_info.value, exc_info.tb)
m = re.search(expected_tb.strip(), "".join(tb))
assert (
m is not None
), f"Traceback did not match:\n\n{''.join(tb)}\nexpected:\n{expected_tb}"
def test_runtime_error(self, fs_env):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = fs_env.get_template("broken.html")
self.assert_traceback_matches(
test,
r"""
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}(
\^{12})?
File ".*debug?.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)(
~~\^~~)?
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
""",
)
def test_syntax_error(self, fs_env):
# The trailing .*? is for PyPy 2 and 3, which don't seem to
# clear the exception's original traceback, leaving the syntax
# error in the middle of other compiler frames.
self.assert_traceback_matches(
lambda: fs_env.get_template("syntaxerror.html"),
"""(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\\{% endif %\\}.*?
(jinja2\\.exceptions\\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja \
was looking for the following tags: 'endfor' or 'else'. The innermost block that needs \
to be closed is 'for'.
""",
)
def test_regular_syntax_error(self, fs_env):
def test():
raise TemplateSyntaxError("wtf", 42)
self.assert_traceback_matches(
test,
r"""
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\("wtf", 42\)(
\^{36})?
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42""",
)
def test_pickleable_syntax_error(self, fs_env):
original = TemplateSyntaxError("bad template", 42, "test", "test.txt")
unpickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(unpickled)
assert original.name == unpickled.name
def test_include_syntax_error_source(self, filesystem_loader):
e = Environment(
loader=ChoiceLoader(
[
filesystem_loader,
DictLoader({"inc": "a\n{% include 'syntaxerror.html' %}\nb"}),
]
)
)
t = e.get_template("inc")
with pytest.raises(TemplateSyntaxError) as exc_info:
t.render()
assert exc_info.value.source is not None
def test_local_extraction(self):
from jinja2.debug import get_template_locals
from jinja2.runtime import missing
locals = get_template_locals(
{
"l_0_foo": 42,
"l_1_foo": 23,
"l_2_foo": 13,
"l_0_bar": 99,
"l_1_bar": missing,
"l_0_baz": missing,
}
)
assert locals == {"foo": 13, "bar": 99}
def test_get_corresponding_lineno_traceback(self, fs_env):
tmpl = fs_env.get_template("test.html")
assert tmpl.get_corresponding_lineno(1) == 1
| 3,704 | 30.398305 | 88 | py |
jinja | jinja-main/tests/test_tests.py | import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2 import TemplateAssertionError
from jinja2 import TemplateRuntimeError
class MyDict(dict):
pass
class TestTestsCase:
def test_defined(self, env):
tmpl = env.from_string("{{ missing is defined }}|{{ true is defined }}")
assert tmpl.render() == "False|True"
def test_even(self, env):
tmpl = env.from_string("""{{ 1 is even }}|{{ 2 is even }}""")
assert tmpl.render() == "False|True"
def test_odd(self, env):
tmpl = env.from_string("""{{ 1 is odd }}|{{ 2 is odd }}""")
assert tmpl.render() == "True|False"
def test_lower(self, env):
tmpl = env.from_string("""{{ "foo" is lower }}|{{ "FOO" is lower }}""")
assert tmpl.render() == "True|False"
# Test type checks
@pytest.mark.parametrize(
"op,expect",
(
("none is none", True),
("false is none", False),
("true is none", False),
("42 is none", False),
("none is true", False),
("false is true", False),
("true is true", True),
("0 is true", False),
("1 is true", False),
("42 is true", False),
("none is false", False),
("false is false", True),
("true is false", False),
("0 is false", False),
("1 is false", False),
("42 is false", False),
("none is boolean", False),
("false is boolean", True),
("true is boolean", True),
("0 is boolean", False),
("1 is boolean", False),
("42 is boolean", False),
("0.0 is boolean", False),
("1.0 is boolean", False),
("3.14159 is boolean", False),
("none is integer", False),
("false is integer", False),
("true is integer", False),
("42 is integer", True),
("3.14159 is integer", False),
("(10 ** 100) is integer", True),
("none is float", False),
("false is float", False),
("true is float", False),
("42 is float", False),
("4.2 is float", True),
("(10 ** 100) is float", False),
("none is number", False),
("false is number", True),
("true is number", True),
("42 is number", True),
("3.14159 is number", True),
("complex is number", True),
("(10 ** 100) is number", True),
("none is string", False),
("false is string", False),
("true is string", False),
("42 is string", False),
('"foo" is string', True),
("none is sequence", False),
("false is sequence", False),
("42 is sequence", False),
('"foo" is sequence', True),
("[] is sequence", True),
("[1, 2, 3] is sequence", True),
("{} is sequence", True),
("none is mapping", False),
("false is mapping", False),
("42 is mapping", False),
('"foo" is mapping', False),
("[] is mapping", False),
("{} is mapping", True),
("mydict is mapping", True),
("none is iterable", False),
("false is iterable", False),
("42 is iterable", False),
('"foo" is iterable', True),
("[] is iterable", True),
("{} is iterable", True),
("range(5) is iterable", True),
("none is callable", False),
("false is callable", False),
("42 is callable", False),
('"foo" is callable', False),
("[] is callable", False),
("{} is callable", False),
("range is callable", True),
),
)
def test_types(self, env, op, expect):
t = env.from_string(f"{{{{ {op} }}}}")
assert t.render(mydict=MyDict(), complex=complex(1, 2)) == str(expect)
def test_upper(self, env):
tmpl = env.from_string('{{ "FOO" is upper }}|{{ "foo" is upper }}')
assert tmpl.render() == "True|False"
def test_equalto(self, env):
tmpl = env.from_string(
"{{ foo is eq 12 }}|"
"{{ foo is eq 0 }}|"
"{{ foo is eq (3 * 4) }}|"
'{{ bar is eq "baz" }}|'
'{{ bar is eq "zab" }}|'
'{{ bar is eq ("ba" + "z") }}|'
"{{ bar is eq bar }}|"
"{{ bar is eq foo }}"
)
assert (
tmpl.render(foo=12, bar="baz")
== "True|False|True|True|False|True|True|False"
)
@pytest.mark.parametrize(
"op,expect",
(
("eq 2", True),
("eq 3", False),
("ne 3", True),
("ne 2", False),
("lt 3", True),
("lt 2", False),
("le 2", True),
("le 1", False),
("gt 1", True),
("gt 2", False),
("ge 2", True),
("ge 3", False),
),
)
def test_compare_aliases(self, env, op, expect):
t = env.from_string(f"{{{{ 2 is {op} }}}}")
assert t.render() == str(expect)
def test_sameas(self, env):
tmpl = env.from_string("{{ foo is sameas false }}|{{ 0 is sameas false }}")
assert tmpl.render(foo=False) == "True|False"
def test_no_paren_for_arg1(self, env):
tmpl = env.from_string("{{ foo is sameas none }}")
assert tmpl.render(foo=None) == "True"
def test_escaped(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string("{{ x is escaped }}|{{ y is escaped }}")
assert tmpl.render(x="foo", y=Markup("foo")) == "False|True"
def test_greaterthan(self, env):
tmpl = env.from_string("{{ 1 is greaterthan 0 }}|{{ 0 is greaterthan 1 }}")
assert tmpl.render() == "True|False"
def test_lessthan(self, env):
tmpl = env.from_string("{{ 0 is lessthan 1 }}|{{ 1 is lessthan 0 }}")
assert tmpl.render() == "True|False"
def test_multiple_tests(self):
items = []
def matching(x, y):
items.append((x, y))
return False
env = Environment()
env.tests["matching"] = matching
tmpl = env.from_string(
"{{ 'us-west-1' is matching '(us-east-1|ap-northeast-1)'"
" or 'stage' is matching '(dev|stage)' }}"
)
assert tmpl.render() == "False"
assert items == [
("us-west-1", "(us-east-1|ap-northeast-1)"),
("stage", "(dev|stage)"),
]
def test_in(self, env):
tmpl = env.from_string(
'{{ "o" is in "foo" }}|'
'{{ "foo" is in "foo" }}|'
'{{ "b" is in "foo" }}|'
"{{ 1 is in ((1, 2)) }}|"
"{{ 3 is in ((1, 2)) }}|"
"{{ 1 is in [1, 2] }}|"
"{{ 3 is in [1, 2] }}|"
'{{ "foo" is in {"foo": 1}}}|'
'{{ "baz" is in {"bar": 1}}}'
)
assert tmpl.render() == "True|True|False|True|False|True|False|True|False"
def test_name_undefined(env):
with pytest.raises(TemplateAssertionError, match="No test named 'f'"):
env.from_string("{{ x is f }}")
def test_name_undefined_in_if(env):
t = env.from_string("{% if x is defined %}{{ x is f }}{% endif %}")
assert t.render() == ""
with pytest.raises(TemplateRuntimeError, match="No test named 'f'"):
t.render(x=1)
def test_is_filter(env):
assert env.call_test("filter", "title")
assert not env.call_test("filter", "bad-name")
def test_is_test(env):
assert env.call_test("test", "number")
assert not env.call_test("test", "bad-name")
| 7,851 | 32.555556 | 83 | py |
jinja | jinja-main/tests/test_core_tags.py | import pytest
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import TemplateRuntimeError
from jinja2 import TemplateSyntaxError
from jinja2 import UndefinedError
@pytest.fixture
def env_trim():
return Environment(trim_blocks=True)
class TestForLoop:
def test_simple(self, env):
tmpl = env.from_string("{% for item in seq %}{{ item }}{% endfor %}")
assert tmpl.render(seq=list(range(10))) == "0123456789"
def test_else(self, env):
tmpl = env.from_string("{% for item in seq %}XXX{% else %}...{% endfor %}")
assert tmpl.render() == "..."
def test_else_scoping_item(self, env):
tmpl = env.from_string("{% for item in [] %}{% else %}{{ item }}{% endfor %}")
assert tmpl.render(item=42) == "42"
def test_empty_blocks(self, env):
tmpl = env.from_string("<{% for item in seq %}{% else %}{% endfor %}>")
assert tmpl.render() == "<>"
def test_context_vars(self, env):
slist = [42, 24]
for seq in [slist, iter(slist), reversed(slist), (_ for _ in slist)]:
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}"""
)
one, two, _ = tmpl.render(seq=seq).split("###")
(
one_index,
one_index0,
one_revindex,
one_revindex0,
one_first,
one_last,
one_length,
) = one.split("|")
(
two_index,
two_index0,
two_revindex,
two_revindex0,
two_first,
two_last,
two_length,
) = two.split("|")
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == "True" and two_first == "False"
assert one_last == "False" and two_last == "True"
assert one_length == two_length == "2"
def test_cycling(self, env):
tmpl = env.from_string(
"""{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}"""
)
output = tmpl.render(seq=list(range(4)), through=("<1>", "<2>"))
assert output == "<1><2>" * 4
def test_lookaround(self, env):
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.previtem|default('x') }}-{{ item }}-{{
loop.nextitem|default('x') }}|
{%- endfor %}"""
)
output = tmpl.render(seq=list(range(4)))
assert output == "x-0-1|0-1-2|1-2-3|2-3-x|"
def test_changed(self, env):
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.changed(item) }},
{%- endfor %}"""
)
output = tmpl.render(seq=[None, None, 1, 2, 2, 3, 4, 4, 4])
assert output == "True,False,True,True,False,True,True,False,False,"
def test_scope(self, env):
tmpl = env.from_string("{% for item in seq %}{% endfor %}{{ item }}")
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self, env):
tmpl = env.from_string("{% for item in iter %}{{ item }}{% endfor %}")
output = tmpl.render(iter=range(5))
assert output == "01234"
def test_noniter(self, env):
tmpl = env.from_string("{% for item in none %}...{% endfor %}")
pytest.raises(TypeError, tmpl.render)
def test_recursive(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1<[1][2]>][2<[1][2]>][3<[a]>]"
)
def test_recursive_lookaround(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.previtem.a if loop.previtem is defined else 'x' }}.{{
item.a }}.{{ loop.nextitem.a if loop.nextitem is defined else 'x'
}}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[x.1.2<[x.1.2][1.2.x]>][1.2.3<[x.1.2][1.2.x]>][2.3.x<[x.a.x]>]"
)
def test_recursive_depth0(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]"
)
def test_recursive_depth(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]"
)
def test_looploop(self, env):
tmpl = env.from_string(
"""{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}"""
)
assert tmpl.render(table=["ab", "cd"]) == "[1|1][1|2][2|1][2|2]"
def test_reversed_bug(self, env):
tmpl = env.from_string(
"{% for i in items %}{{ i }}"
"{% if not loop.last %}"
",{% endif %}{% endfor %}"
)
assert tmpl.render(items=reversed([3, 2, 1])) == "1,2,3"
def test_loop_errors(self, env):
tmpl = env.from_string(
"""{% for item in [1] if loop.index
== 0 %}...{% endfor %}"""
)
pytest.raises(UndefinedError, tmpl.render)
tmpl = env.from_string(
"""{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}"""
)
assert tmpl.render() == ""
def test_loop_filter(self, env):
tmpl = env.from_string(
"{% for item in range(10) if item is even %}[{{ item }}]{% endfor %}"
)
assert tmpl.render() == "[0][2][4][6][8]"
tmpl = env.from_string(
"""
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}"""
)
assert tmpl.render() == "[1:0][2:2][3:4][4:6][5:8]"
def test_loop_unassignable(self, env):
pytest.raises(
TemplateSyntaxError, env.from_string, "{% for loop in seq %}...{% endfor %}"
)
def test_scoped_special_var(self, env):
t = env.from_string(
"{% for s in seq %}[{{ loop.first }}{% for c in s %}"
"|{{ loop.first }}{% endfor %}]{% endfor %}"
)
assert t.render(seq=("ab", "cd")) == "[True|True|False][False|True|False]"
def test_scoped_loop_var(self, env):
t = env.from_string(
"{% for x in seq %}{{ loop.first }}"
"{% for y in seq %}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalse"
t = env.from_string(
"{% for x in seq %}{% for y in seq %}"
"{{ loop.first }}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalseTrueFalse"
def test_recursive_empty_loop_iter(self, env):
t = env.from_string(
"""
{%- for item in foo recursive -%}{%- endfor -%}
"""
)
assert t.render(dict(foo=[])) == ""
def test_call_in_loop(self, env):
t = env.from_string(
"""
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
"""
)
assert t.render() == "[1][2][3]"
def test_scoping_bug(self, env):
t = env.from_string(
"""
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
"""
)
assert t.render(foo=(1,)) == "...1......2..."
def test_unpacking(self, env):
tmpl = env.from_string(
"{% for a, b, c in [[1, 2, 3]] %}{{ a }}|{{ b }}|{{ c }}{% endfor %}"
)
assert tmpl.render() == "1|2|3"
def test_intended_scoping_with_set(self, env):
tmpl = env.from_string(
"{% for item in seq %}{{ x }}{% set x = item %}{{ x }}{% endfor %}"
)
assert tmpl.render(x=0, seq=[1, 2, 3]) == "010203"
tmpl = env.from_string(
"{% set x = 9 %}{% for item in seq %}{{ x }}"
"{% set x = item %}{{ x }}{% endfor %}"
)
assert tmpl.render(x=0, seq=[1, 2, 3]) == "919293"
class TestIfCondition:
def test_simple(self, env):
tmpl = env.from_string("""{% if true %}...{% endif %}""")
assert tmpl.render() == "..."
def test_elif(self, env):
tmpl = env.from_string(
"""{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}"""
)
assert tmpl.render() == "..."
def test_elif_deep(self, env):
elifs = "\n".join(f"{{% elif a == {i} %}}{i}" for i in range(1, 1000))
tmpl = env.from_string(f"{{% if a == 0 %}}0{elifs}{{% else %}}x{{% endif %}}")
for x in (0, 10, 999):
assert tmpl.render(a=x).strip() == str(x)
assert tmpl.render(a=1000).strip() == "x"
def test_else(self, env):
tmpl = env.from_string("{% if false %}XXX{% else %}...{% endif %}")
assert tmpl.render() == "..."
def test_empty(self, env):
tmpl = env.from_string("[{% if true %}{% else %}{% endif %}]")
assert tmpl.render() == "[]"
def test_complete(self, env):
tmpl = env.from_string(
"{% if a %}A{% elif b %}B{% elif c == d %}C{% else %}D{% endif %}"
)
assert tmpl.render(a=0, b=False, c=42, d=42.0) == "C"
def test_no_scope(self, env):
tmpl = env.from_string("{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}")
assert tmpl.render(a=True) == "1"
tmpl = env.from_string("{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}")
assert tmpl.render() == "1"
class TestMacros:
def test_simple(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}"""
)
assert tmpl.render() == "Hello Peter!"
def test_scoping(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}"""
)
assert tmpl.render() == "foo|bar"
def test_arguments(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}"""
)
assert tmpl.render() == "||c|d|a||c|d|a|b|c|d|1|2|3|d"
def test_arguments_defaults_nonsense(self, env_trim):
pytest.raises(
TemplateSyntaxError,
env_trim.from_string,
"""\
{% macro m(a, b=1, c) %}a={{ a }}, b={{ b }}, c={{ c }}{% endmacro %}""",
)
def test_caller_defaults_nonsense(self, env_trim):
pytest.raises(
TemplateSyntaxError,
env_trim.from_string,
"""\
{% macro a() %}{{ caller() }}{% endmacro %}
{% call(x, y=1, z) a() %}{% endcall %}""",
)
def test_varargs(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}"""
)
assert tmpl.render() == "1|2|3"
def test_simple_call(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}"""
)
assert tmpl.render() == "[[data]]"
def test_complex_call(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}"""
)
assert tmpl.render() == "[[data]]"
def test_caller_undefined(self, env_trim):
tmpl = env_trim.from_string(
"""\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}"""
)
assert tmpl.render() == "True"
def test_include(self, env_trim):
env_trim = Environment(
loader=DictLoader(
{"include": "{% macro test(foo) %}[{{ foo }}]{% endmacro %}"}
)
)
tmpl = env_trim.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == "[foo]"
def test_macro_api(self, env_trim):
tmpl = env_trim.from_string(
"{% macro foo(a, b) %}{% endmacro %}"
"{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}"
"{% macro baz() %}{{ caller() }}{% endmacro %}"
)
assert tmpl.module.foo.arguments == ("a", "b")
assert tmpl.module.foo.name == "foo"
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self, env_trim):
tmpl = env_trim.from_string(
"{% macro foo(x) %}{{ x }}{% if x > 1 %}|"
"{{ foo(x - 1) }}{% endif %}{% endmacro %}"
"{{ foo(5) }}"
)
assert tmpl.render() == "5|4|3|2|1"
def test_macro_defaults_self_ref(self, env):
tmpl = env.from_string(
"""
{%- set x = 42 %}
{%- macro m(a, b=x, x=23) %}{{ a }}|{{ b }}|{{ x }}{% endmacro -%}
"""
)
assert tmpl.module.m(1) == "1||23"
assert tmpl.module.m(1, 2) == "1|2|23"
assert tmpl.module.m(1, 2, 3) == "1|2|3"
assert tmpl.module.m(1, x=7) == "1|7|7"
class TestSet:
def test_normal(self, env_trim):
tmpl = env_trim.from_string("{% set foo = 1 %}{{ foo }}")
assert tmpl.render() == "1"
assert tmpl.module.foo == 1
def test_block(self, env_trim):
tmpl = env_trim.from_string("{% set foo %}42{% endset %}{{ foo }}")
assert tmpl.render() == "42"
assert tmpl.module.foo == "42"
def test_block_escaping(self):
env = Environment(autoescape=True)
tmpl = env.from_string(
"{% set foo %}<em>{{ test }}</em>{% endset %}foo: {{ foo }}"
)
assert tmpl.render(test="<unsafe>") == "foo: <em><unsafe></em>"
def test_set_invalid(self, env_trim):
pytest.raises(
TemplateSyntaxError, env_trim.from_string, "{% set foo['bar'] = 1 %}"
)
tmpl = env_trim.from_string("{% set foo.bar = 1 %}")
exc_info = pytest.raises(TemplateRuntimeError, tmpl.render, foo={})
assert "non-namespace object" in exc_info.value.message
def test_namespace_redefined(self, env_trim):
tmpl = env_trim.from_string("{% set ns = namespace() %}{% set ns.bar = 'hi' %}")
exc_info = pytest.raises(TemplateRuntimeError, tmpl.render, namespace=dict)
assert "non-namespace object" in exc_info.value.message
def test_namespace(self, env_trim):
tmpl = env_trim.from_string(
"{% set ns = namespace() %}{% set ns.bar = '42' %}{{ ns.bar }}"
)
assert tmpl.render() == "42"
def test_namespace_block(self, env_trim):
tmpl = env_trim.from_string(
"{% set ns = namespace() %}{% set ns.bar %}42{% endset %}{{ ns.bar }}"
)
assert tmpl.render() == "42"
def test_init_namespace(self, env_trim):
tmpl = env_trim.from_string(
"{% set ns = namespace(d, self=37) %}"
"{% set ns.b = 42 %}"
"{{ ns.a }}|{{ ns.self }}|{{ ns.b }}"
)
assert tmpl.render(d={"a": 13}) == "13|37|42"
def test_namespace_loop(self, env_trim):
tmpl = env_trim.from_string(
"{% set ns = namespace(found=false) %}"
"{% for x in range(4) %}"
"{% if x == v %}"
"{% set ns.found = true %}"
"{% endif %}"
"{% endfor %}"
"{{ ns.found }}"
)
assert tmpl.render(v=3) == "True"
assert tmpl.render(v=4) == "False"
def test_namespace_macro(self, env_trim):
tmpl = env_trim.from_string(
"{% set ns = namespace() %}"
"{% set ns.a = 13 %}"
"{% macro magic(x) %}"
"{% set x.b = 37 %}"
"{% endmacro %}"
"{{ magic(ns) }}"
"{{ ns.a }}|{{ ns.b }}"
)
assert tmpl.render() == "13|37"
def test_block_escaping_filtered(self):
env = Environment(autoescape=True)
tmpl = env.from_string(
"{% set foo | trim %}<em>{{ test }}</em> {% endset %}foo: {{ foo }}"
)
assert tmpl.render(test="<unsafe>") == "foo: <em><unsafe></em>"
def test_block_filtered(self, env_trim):
tmpl = env_trim.from_string(
"{% set foo | trim | length | string %} 42 {% endset %}{{ foo }}"
)
assert tmpl.render() == "2"
assert tmpl.module.foo == "2"
def test_block_filtered_set(self, env_trim):
def _myfilter(val, arg):
assert arg == " xxx "
return val
env_trim.filters["myfilter"] = _myfilter
tmpl = env_trim.from_string(
'{% set a = " xxx " %}'
"{% set foo | myfilter(a) | trim | length | string %}"
' {% set b = " yy " %} 42 {{ a }}{{ b }} '
"{% endset %}"
"{{ foo }}"
)
assert tmpl.render() == "11"
assert tmpl.module.foo == "11"
class TestWith:
def test_with(self, env):
tmpl = env.from_string(
"""\
{% with a=42, b=23 -%}
{{ a }} = {{ b }}
{% endwith -%}
{{ a }} = {{ b }}\
"""
)
assert [x.strip() for x in tmpl.render(a=1, b=2).splitlines()] == [
"42 = 23",
"1 = 2",
]
def test_with_argument_scoping(self, env):
tmpl = env.from_string(
"""\
{%- with a=1, b=2, c=b, d=e, e=5 -%}
{{ a }}|{{ b }}|{{ c }}|{{ d }}|{{ e }}
{%- endwith -%}
"""
)
assert tmpl.render(b=3, e=4) == "1|2|3|4|5"
| 20,316 | 33.088926 | 88 | py |
jinja | jinja-main/tests/test_nodes.py | def test_template_hash(env):
template = env.parse("hash test")
hash(template)
| 86 | 20.75 | 37 | py |
jinja | jinja-main/tests/test_utils.py | import pickle
import random
from collections import deque
from copy import copy as shallow_copy
import pytest
from markupsafe import Markup
from jinja2.utils import consume
from jinja2.utils import generate_lorem_ipsum
from jinja2.utils import LRUCache
from jinja2.utils import missing
from jinja2.utils import object_type_repr
from jinja2.utils import select_autoescape
from jinja2.utils import urlize
class TestLRUCache:
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert d.keys() == ["d", "a", "c"]
def test_values(self):
cache = LRUCache(3)
cache["b"] = 1
cache["a"] = 2
assert cache.values() == [2, 1]
def test_values_empty(self):
cache = LRUCache(2)
assert cache.values() == []
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.parametrize("copy_func", [LRUCache.copy, shallow_copy])
def test_copy(self, copy_func):
cache = LRUCache(2)
cache["a"] = 1
cache["b"] = 2
copy = copy_func(cache)
assert copy._queue == cache._queue
copy["c"] = 3
assert copy._queue != cache._queue
assert copy.keys() == ["c", "b"]
def test_clear(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d.clear()
assert d.__getstate__() == {"capacity": 3, "_mapping": {}, "_queue": deque([])}
def test_repr(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
# Sort the strings - mapping is unordered
assert sorted(repr(d)) == sorted("<LRUCache {'a': 1, 'b': 2, 'c': 3}>")
def test_items(self):
"""Test various items, keys, values and iterators of LRUCache."""
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
assert d.items() == [("c", 3), ("b", 2), ("a", 1)]
assert d.keys() == ["c", "b", "a"]
assert d.values() == [3, 2, 1]
assert list(reversed(d)) == ["a", "b", "c"]
# Change the cache a little
d["b"]
d["a"] = 4
assert d.items() == [("a", 4), ("b", 2), ("c", 3)]
assert d.keys() == ["a", "b", "c"]
assert d.values() == [4, 2, 3]
assert list(reversed(d)) == ["c", "b", "a"]
def test_setdefault(self):
d = LRUCache(3)
assert len(d) == 0
assert d.setdefault("a") is None
assert d.setdefault("a", 1) is None
assert len(d) == 1
assert d.setdefault("b", 2) == 2
assert len(d) == 2
class TestHelpers:
def test_object_type_repr(self):
class X:
pass
assert object_type_repr(42) == "int object"
assert object_type_repr([]) == "list object"
assert object_type_repr(X()) == "test_utils.X object"
assert object_type_repr(None) == "None"
assert object_type_repr(Ellipsis) == "Ellipsis"
def test_autoescape_select(self):
func = select_autoescape(
enabled_extensions=("html", ".htm"),
disabled_extensions=("txt",),
default_for_string="STRING",
default="NONE",
)
assert func(None) == "STRING"
assert func("unknown.foo") == "NONE"
assert func("foo.html")
assert func("foo.htm")
assert not func("foo.txt")
assert func("FOO.HTML")
assert not func("FOO.TXT")
class TestEscapeUrlizeTarget:
def test_escape_urlize_target(self):
url = "http://example.org"
target = "<script>"
assert urlize(url, target=target) == (
'<a href="http://example.org"'
' target="<script>">'
"http://example.org</a>"
)
class TestLoremIpsum:
def test_lorem_ipsum_markup(self):
"""Test that output of lorem_ipsum is Markup by default."""
assert isinstance(generate_lorem_ipsum(), Markup)
def test_lorem_ipsum_html(self):
"""Test that output of lorem_ipsum is a string_type when not html."""
assert isinstance(generate_lorem_ipsum(html=False), str)
def test_lorem_ipsum_n(self):
"""Test that the n (number of lines) works as expected."""
assert generate_lorem_ipsum(n=0, html=False) == ""
for n in range(1, 50):
assert generate_lorem_ipsum(n=n, html=False).count("\n") == (n - 1) * 2
def test_lorem_ipsum_min(self):
"""Test that at least min words are in the output of each line"""
for _ in range(5):
m = random.randrange(20, 99)
for _ in range(10):
assert generate_lorem_ipsum(n=1, min=m, html=False).count(" ") >= m - 1
def test_lorem_ipsum_max(self):
"""Test that at least max words are in the output of each line"""
for _ in range(5):
m = random.randrange(21, 100)
for _ in range(10):
assert generate_lorem_ipsum(n=1, max=m, html=False).count(" ") < m - 1
def test_missing():
"""Test the repr of missing."""
assert repr(missing) == "missing"
def test_consume():
"""Test that consume consumes an iterator."""
x = iter([1, 2, 3, 4, 5])
consume(x)
with pytest.raises(StopIteration):
next(x)
| 5,631 | 29.27957 | 87 | py |
jinja | jinja-main/tests/res/__init__.py | 0 | 0 | 0 | py | |
jinja | jinja-main/docs/conf.py | from pallets_sphinx_themes import get_version
from pallets_sphinx_themes import ProjectLink
# Project --------------------------------------------------------------
project = "Jinja"
copyright = "2007 Pallets"
author = "Pallets"
release, version = get_version("Jinja2")
# General --------------------------------------------------------------
master_doc = "index"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"pallets_sphinx_themes",
"sphinxcontrib.log_cabinet",
"sphinx_issues",
]
autodoc_typehints = "description"
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
issues_github_path = "pallets/jinja"
# HTML -----------------------------------------------------------------
html_theme = "jinja"
html_theme_options = {"index_sidebar_logo": False}
html_context = {
"project_links": [
ProjectLink("Donate", "https://palletsprojects.com/donate"),
ProjectLink("PyPI Releases", "https://pypi.org/project/Jinja2/"),
ProjectLink("Source Code", "https://github.com/pallets/jinja/"),
ProjectLink("Issue Tracker", "https://github.com/pallets/jinja/issues/"),
ProjectLink("Chat", "https://discord.gg/pallets"),
]
}
html_sidebars = {
"index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"],
"**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]}
html_static_path = ["_static"]
html_favicon = "_static/jinja-logo-sidebar.png"
html_logo = "_static/jinja-logo-sidebar.png"
html_title = f"Jinja Documentation ({version})"
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [(master_doc, f"Jinja-{version}.tex", html_title, author, "manual")]
| 1,856 | 34.711538 | 86 | py |
jinja | jinja-main/docs/examples/cache_extension.py | from jinja2 import nodes
from jinja2.ext import Extension
class FragmentCacheExtension(Extension):
# a set of names that trigger the extension.
tags = {"cache"}
def __init__(self, environment):
super().__init__(environment)
# add the defaults to the environment
environment.extend(fragment_cache_prefix="", fragment_cache=None)
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'cache'`` so this will be a name token with
# `cache` as value. We get the line number so that we can give
# that line number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# now we parse a single expression that is used as cache key.
args = [parser.parse_expression()]
# if there is a comma, the user provided a timeout. If not use
# None as second parameter.
if parser.stream.skip_if("comma"):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
# now we parse the body of the cache block up to `endcache` and
# drop the needle (which would always be `endcache` in that case)
body = parser.parse_statements(["name:endcache"], drop_needle=True)
# now return a `CallBlock` node that calls our _cache_support
# helper method on this extension.
return nodes.CallBlock(
self.call_method("_cache_support", args), [], [], body
).set_lineno(lineno)
def _cache_support(self, name, timeout, caller):
"""Helper callback."""
key = self.environment.fragment_cache_prefix + name
# try to load the block from the cache
# if there is no fragment in the cache, render it and store
# it in the cache.
rv = self.environment.fragment_cache.get(key)
if rv is not None:
return rv
rv = caller()
self.environment.fragment_cache.add(key, rv, timeout)
return rv
| 2,053 | 36.345455 | 75 | py |
jinja | jinja-main/docs/examples/inline_gettext_extension.py | import re
from jinja2.exceptions import TemplateSyntaxError
from jinja2.ext import Extension
from jinja2.lexer import count_newlines
from jinja2.lexer import Token
_outside_re = re.compile(r"\\?(gettext|_)\(")
_inside_re = re.compile(r"\\?[()]")
class InlineGettext(Extension):
"""This extension implements support for inline gettext blocks::
<h1>_(Welcome)</h1>
<p>_(This is a paragraph)</p>
Requires the i18n extension to be loaded and configured.
"""
def filter_stream(self, stream):
paren_stack = 0
for token in stream:
if token.type != "data":
yield token
continue
pos = 0
lineno = token.lineno
while True:
if not paren_stack:
match = _outside_re.search(token.value, pos)
else:
match = _inside_re.search(token.value, pos)
if match is None:
break
new_pos = match.start()
if new_pos > pos:
preval = token.value[pos:new_pos]
yield Token(lineno, "data", preval)
lineno += count_newlines(preval)
gtok = match.group()
if gtok[0] == "\\":
yield Token(lineno, "data", gtok[1:])
elif not paren_stack:
yield Token(lineno, "block_begin", None)
yield Token(lineno, "name", "trans")
yield Token(lineno, "block_end", None)
paren_stack = 1
else:
if gtok == "(" or paren_stack > 1:
yield Token(lineno, "data", gtok)
paren_stack += -1 if gtok == ")" else 1
if not paren_stack:
yield Token(lineno, "block_begin", None)
yield Token(lineno, "name", "endtrans")
yield Token(lineno, "block_end", None)
pos = match.end()
if pos < len(token.value):
yield Token(lineno, "data", token.value[pos:])
if paren_stack:
raise TemplateSyntaxError(
"unclosed gettext expression",
token.lineno,
stream.name,
stream.filename,
)
| 2,398 | 31.863014 | 68 | py |
HyperIMBA | HyperIMBA-main/main.py | import argparse
import torch
import dataloader as dl
import torch.nn.functional as F
import numpy as np
from models import GatHyper, SageHyper, GcnHyper
import test as tt
def main(args):
if args.dataset == 'all':
ds_names = ['Cora','Citeseer','Photo','Actor','chameleon','Squirrel']
else:
ds_names = [args.dataset]
if args.backbone in ['all','Gcn','Gat','Sage']:
if args.backbone == 'all':
backbones = [b+'Hyper' for b in ['Gcn','Gat','Sage']]
else:
backbones = [args.backbone+'Hyper']
else:
return
for ds in ds_names:
for babo in backbones:
babotrain_acc={babo:[i for i in range(args.run_times)]}
babovalid_acc={babo:[i for i in range(args.run_times)]}
babotest_acc={babo:[i for i in range(args.run_times)]}
babowf1={babo:[i for i in range(args.run_times)]}
f2=open('results/'+ds+babo+'_scores.txt', 'w+')
f2.write('{0:7} {1:7}\n'.format(ds,babo))
f2.write('{0:7} {1:7} {2:7} {3:7} {4:7}\n'.format('run','train','valid','m-f1','w-f1'))
f2.flush()
for run in range(args.run_times):
dataset,data,train_mask,val_mask,test_mask = dl.select_dataset(ds, args.split)
model,data = globals()[babo].call(data,dataset.name,data.x.size(1),dataset.num_classes,args.hid_dim)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
best_val_acc = test_acc = 0.0
best_val_loss = np.inf
for epoch in range(1, args.epoch+1):
model.train()
optimizer.zero_grad()
F.nll_loss(model(data,args.loss_hp)[train_mask], data.y[train_mask]).backward()
optimizer.step()
train_acc,val_acc,tmp_test_acc,val_loss,tmp_w_f1 = tt.test(model, data, train_mask, val_mask, test_mask, args.loss_hp)
#print("acc:", train_acc,val_acc,tmp_test_acc,val_loss.item(),tmp_w_f1)
if val_acc>=best_val_acc:
train_re=train_acc
best_val_acc=val_acc
test_acc=tmp_test_acc
w_f1 = tmp_w_f1
best_val_loss=val_loss
wait_step=0
else:
wait_step += 1
if wait_step == args.stop_step:
#print('Early stop! Validate-- Min loss: ', best_val_loss, ', Max f1-score: ', best_val_acc)
break
del model
del data
babotrain_acc[babo][run]=train_re
babovalid_acc[babo][run]=best_val_acc
babotest_acc[babo][run]=test_acc
babowf1[babo][run]=w_f1
log ='Epoch: 200, dataset name: '+ ds + ', Backbone: '+ babo + ', Test: {0:.4f} {1:.4f}\n'
print((log.format(babotest_acc[babo][run],babowf1[babo][run])))
f2.write('{0:4d} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format(run,babotrain_acc[babo][run],babovalid_acc[babo][run],babotest_acc[babo][run],babowf1[babo][run]))
f2.flush()
f2.write('{0:4} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format('std',np.std(babotrain_acc[babo]),np.std(babovalid_acc[babo]),np.std(babotest_acc[babo]),np.std(babowf1[babo])))
f2.write('{0:4} {1:4f} {2:4f} {3:4f} {4:4f}\n'.format('mean',np.mean(babotrain_acc[babo]),np.mean(babovalid_acc[babo]),np.mean(babotest_acc[babo]),np.mean(babowf1[babo])))
f2.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperbolic Geometric Hierarchy-IMBAlance Learning')
parser.add_argument("--dataset", '-d', type=str, default="Cora", help="all,Cora,Citeseer,Photo,Actor,chameleon,Squirrel")
parser.add_argument("--backbone", '-b', type=str, default="Gcn", help="all,Gcn,Gat,Sage")
parser.add_argument("--split", '-s', type=str, default=0,
help="Way of train-set split: 0~5(random,(0.5,1),(0,0.05),(0.66,1),(0.33,0.66),(0,0.33))")
parser.add_argument("--gpu", type=int, default=-1, help="GPU index. Default: -1, using CPU.")
parser.add_argument("--hid_dim", type=int, default=256, help="Hidden layer dimension")
parser.add_argument("--num_layers", type=int, default=2, help="Number of layers")
parser.add_argument("--epoch", type=int, default=200, help="Number of epochs. Default: 200")
parser.add_argument("--run_times", type=int, default=10, help="Run times")
parser.add_argument("--lr", type=float, default=0.01, help="Learning rate. Default: 0.01")
parser.add_argument("--weight_decay", type=float, default=0.0005, help="Weight decay. Default: 0.0005")
parser.add_argument("--loss_hp", type=float, default=1, help="Loss hyper-parameters (alpha). Default: 1")
#parser.add_argument('--early_stop', action='store_true', default=True, help="Indicates whether to use early stop")
parser.add_argument('--stop_step', default=100, help="Step of early stop")
args = parser.parse_args()
print(args)
main(args)
| 5,254 | 56.119565 | 183 | py |
HyperIMBA | HyperIMBA-main/test.py | import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
def test(model, data, train_mask, val_mask, test_mask, alpha):
with torch.no_grad():
model.eval()
logits, accs = model(data, alpha), []
for mask in [train_mask,val_mask,test_mask]:
pred = logits[mask].max(1)[1]
acc = f1_score(pred.cpu(), data.y[mask].cpu(), average='micro')
accs.append(acc)
accs.append(F.nll_loss(model(data, alpha)[val_mask], data.y[val_mask]))
accs.append(f1_score(pred.cpu(), data.y[mask].cpu(), average='weighted'))
return accs | 617 | 37.625 | 81 | py |
HyperIMBA | HyperIMBA-main/dataloader.py | import torch_geometric.datasets as dt
import torch_geometric.transforms as T
import torch
import numpy as np
from dgl.data.utils import generate_mask_tensor, idx2mask
from sklearn.model_selection import train_test_split
def select_dataset(ds,spcial):
if ds=='Cora' or ds=='Citeseer':
ds_loader='Planetoid'
elif ds=='Photo':
ds_loader='Amazon'
elif ds == 'chameleon' or ds == 'Squirrel':
ds_loader='WikipediaNetwork'
else:
ds_loader=ds
dataset=load_datas(ds_loader,ds,spcial)
if ds == 'Actor':
data=dataset.data
dataset.name = ds
else:
data=dataset[0]
train_mask=data.train_mask
val_mask=data.val_mask
test_mask=data.test_mask
return dataset,data,train_mask,val_mask,test_mask
def load_datas(ds_loader,ds,spcial):
if ds_loader=='Planetoid':
dataset = dt.Planetoid(root='data/'+ds, name=ds, transform=T.NormalizeFeatures())
else:
dataset = getattr(dt, ds_loader)('data/'+ds,ds)
if ds_loader == 'Actor':
dataset.name = ds
data = get_split(dataset, spcial)
dataset.data = data
return dataset
def get_split(dataset, spcial):
data = dataset.data
values=np.load('hyperemb/'+dataset.name+'_values.npy')
sorted, indices = torch.sort(torch.norm(torch.tensor(values),dim=1),descending=True)
#train set split ratio 1:1:8
if spcial == 1:#Top 50% in the Poincare weight
train_idx, val_idx, test_idx = split_idx1(indices[:data.num_nodes//2],indices[data.num_nodes//2:], 0.2, 0.1, 42)
elif spcial == 2:#Bottom 50%
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//2:],indices[:data.num_nodes//2], 0.2, 0.1, 42)
elif spcial == 3:#Top 33%
train_idx, val_idx, test_idx = split_idx1(indices[:data.num_nodes//3],indices[data.num_nodes//3:], 0.3, 0.1, 42)
elif spcial == 4:#Middle 33%
remaining = torch.cat((indices[:data.num_nodes//3],indices[data.num_nodes//3+data.num_nodes//3:]))
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//3:data.num_nodes//3+data.num_nodes//3],remaining, 0.3, 0.1, 42)
elif spcial == 5:#Bottom 33%
train_idx, val_idx, test_idx = split_idx1(indices[data.num_nodes//3+data.num_nodes//3:],indices[:data.num_nodes//3+data.num_nodes//3], 0.3, 0.1, 42)
else:#random
train_idx, val_idx, test_idx = split_idx(np.arange(data.num_nodes), 0.1, 0.1, 42)
data.train_mask = generate_mask_tensor(idx2mask(train_idx, data.num_nodes))
data.val_mask = generate_mask_tensor(idx2mask(val_idx, data.num_nodes))
data.test_mask = generate_mask_tensor(idx2mask(test_idx, data.num_nodes))
return data
def split_idx(samples, train_size, val_size, random_state=None):
train, val = train_test_split(samples, train_size=train_size, random_state=random_state)
if isinstance(val_size, float):
val_size *= len(samples) / len(val)
val, test = train_test_split(val, train_size=val_size, random_state=random_state)
return train, val, test
def split_idx1(samples1, samples2, train_size, val_size, random_state=None):
train, val = train_test_split(samples1, train_size=train_size, random_state=random_state)
val = torch.cat((val,samples2))
val, test = train_test_split(val, train_size=val_size, random_state=random_state)
return train, val, test
| 3,365 | 41.607595 | 156 | py |
HyperIMBA | HyperIMBA-main/calculator.py | #Calculate Hyperbolic Embedding
import argparse
import torch
import numpy as np
from models.Poincare import PoincareModel
import dataloader as dl
from torch_geometric.utils import degree, to_networkx
from GraphRicciCurvature.OllivierRicci import OllivierRicci
parser = argparse.ArgumentParser(description='Calculate Hyperbolic Embedding')
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--manifolds', type=str, default='poincare', help="ricci, poincare")
parser.add_argument("--dataset", '-d', type=str, default="Cora", help="all,Cora,Citeseer,Photo,Actor,chameleon,Squirrel")
parser.add_argument("--split", '-s', type=str, default=0, help="Random split train-set")
args = parser.parse_args()
print(args)
dataset,data,_,_,_ = dl.select_dataset(args.dataset, args.split)
if args.manifolds=='ricci':
G = to_networkx(data)
orc = OllivierRicci(G, alpha=0.5, verbose="TRACE")
orc.compute_ricci_curvature()
G_orc = orc.G.copy() # save an intermediate result
curvature="ricciCurvature"
ricci_results = {}
ricci = {}
for i,(n1,n2) in enumerate(list(G_orc.edges()),0):
#ricci_results[i] = G_orc[n1][n2][curvature]
ricci[i] = [int(n1),int(n2),G_orc[n1][n2][curvature]]
weights = [ricci[i] for i in ricci.keys()]
np.savetxt('hyperemb/' + args.dataset + '.edge_list',weights,fmt="%d %d %.16f")
else:
degrees = np.array(degree(data.edge_index[0],num_nodes=data.num_nodes)+degree(data.edge_index[1],num_nodes=data.num_nodes))
edges_list = list(data.edge_index.t().numpy())
labels = dict(enumerate(data.y.numpy()+1, 0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dim = 2
model = PoincareModel(edges_list,node_weights=degrees*0.2,node_labels=labels, n_components=dim,eta=0.01,n_negative=10, name="hierarchy", device=device)
model.init_embeddings()
model.train(args.epochs)
weights = model.embeddings
keys = np.array([item for item in model.emb_dict.keys()])
values = np.array([item for item in model.emb_dict.values()])
np.save('hyperemb/' + args.dataset + '_keys.npy', keys)
np.save('hyperemb/' + args.dataset + '_values.npy', values)
| 2,190 | 41.134615 | 155 | py |
HyperIMBA | HyperIMBA-main/models/GcnHyper.py | from typing import Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_sparse import SparseTensor, fill_diag, matmul, mul
from torch_sparse import sum as sparsesum
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import zeros
from torch_geometric.typing import Adj, OptTensor, PairTensor
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from torch_geometric.utils import add_self_loops, remove_self_loops,degree,softmax
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
# type: (Tensor, OptTensor, Optional[int], bool, bool, str, Optional[int]) -> PairTensor # noqa
pass
@torch.jit._overload
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
# type: (SparseTensor, OptTensor, Optional[int], bool, bool, str, Optional[int]) -> SparseTensor # noqa
pass
def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False,
add_self_loops=True, flow="source_to_target", dtype=None):
fill_value = 2. if improved else 1.
if isinstance(edge_index, SparseTensor):
assert flow in ["source_to_target"]
adj_t = edge_index
if not adj_t.has_value():
adj_t = adj_t.fill_value(1., dtype=dtype)
if add_self_loops:
adj_t = fill_diag(adj_t, fill_value)
deg = sparsesum(adj_t, dim=1)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0.)
adj_t = mul(adj_t, deg_inv_sqrt.view(-1, 1))
adj_t = mul(adj_t, deg_inv_sqrt.view(1, -1))
return adj_t
else:
assert flow in ["source_to_target", "target_to_source"]
num_nodes = maybe_num_nodes(edge_index, num_nodes)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
if add_self_loops:
edge_index, tmp_edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
assert tmp_edge_weight is not None
edge_weight = tmp_edge_weight
row, col = edge_index[0], edge_index[1]
idx = col if flow == "source_to_target" else row
deg = scatter_add(edge_weight, idx, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
class GCNConv(MessagePassing):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
The adjacency matrix can include other values than :obj:`1` representing
edge weights via the optional :obj:`edge_weight` tensor.
Its node-wise formulation is given by:
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}^{\top} \sum_{j \in
\mathcal{N}(v) \cup \{ i \}} \frac{e_{j,i}}{\sqrt{\hat{d}_j
\hat{d}_i}} \mathbf{x}_j
with :math:`\hat{d}_i = 1 + \sum_{j \in \mathcal{N}(i)} e_{j,i}`, where
:math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to target
node :obj:`i` (default: :obj:`1.0`)
Args:
in_channels (int): Size of each input sample, or :obj:`-1` to derive
the size from the first input(s) to the forward method.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
add_self_loops (bool, optional): If set to :obj:`False`, will not add
self-loops to the input graph. (default: :obj:`True`)
normalize (bool, optional): Whether to add self-loops and compute
symmetric normalization coefficients on the fly.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})`,
edge indices :math:`(2, |\mathcal{E}|)`,
edge weights :math:`(|\mathcal{E}|)` *(optional)*
- **output:** node features :math:`(|\mathcal{V}|, F_{out})`
"""
_cached_edge_index: Optional[Tuple[Tensor, Tensor]]
_cached_adj_t: Optional[SparseTensor]
def __init__(self, in_channels: int, out_channels: int,
k_ricci,e_poinc,n_components,n_components_p,
improved: bool = False, cached: bool = False,
add_self_loops: bool = True, normalize: bool = True,
bias: bool = True, **kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.add_self_loops = add_self_loops
self.normalize = normalize
self.k_ricci = k_ricci
self.e_poinc = e_poinc
self._cached_edge_index = None
self._cached_adj_t = None
self.lin = Linear(in_channels, out_channels, bias=False)
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels]
self.hmpnn=create_wmlp(widths,out_channels,1)
self.ham=create_wmlp(widths_p,out_channels,1)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.lin.reset_parameters()
zeros(self.bias)
self._cached_edge_index = None
self._cached_adj_t = None
def forward(self, x: Tensor, edge_index: Adj, alpha_hp: float,
edge_weight: OptTensor = None) -> Tensor:
""""""
if self.normalize:
if isinstance(edge_index, Tensor):
cache = self._cached_edge_index
if cache is None:
edge_index, edge_weight = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim),
self.improved, self.add_self_loops, self.flow)
if self.cached:
self._cached_edge_index = (edge_index, edge_weight)
else:
edge_index, edge_weight = cache[0], cache[1]
elif isinstance(edge_index, SparseTensor):
cache = self._cached_adj_t
if cache is None:
edge_index = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim),
self.improved, self.add_self_loops, self.flow)
if self.cached:
self._cached_adj_t = edge_index
else:
edge_index = cache
edge_weight = edge_weight.view(-1, 1)
x = self.lin(x)
edge_weight=self.hmpnn(self.k_ricci)
edge_weight=softmax(edge_weight,edge_index[0])
# propagate_type: (x: Tensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=None)
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
if self.bias is not None:
out += self.bias
return out+alpha_hp*p_weight
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight * x_j
def update(self, aggr_out):
return aggr_out
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return matmul(adj_t, x, reduce=self.aggr)
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = GCNConv(num_features, num_hidden,k_ricci,e_poinc,n_components,n_components_p, cached=True)
self.conv2 = GCNConv(num_hidden, num_classes, k_ricci,e_poinc,n_components,n_components_p, cached=True)
def forward(self,data,alpha):
x = F.dropout(data.x,p=0.6,training=self.training)
x = self.conv1(x, data.edge_index, alpha)
x = F.elu(x)
x = F.dropout(x,p=0.6,training=self.training)
x = self.conv2(x, data.edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data | 12,196 | 40.06734 | 131 | py |
HyperIMBA | HyperIMBA-main/models/SageHyper.py | import numpy as np
import torch
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from typing import List, Optional, Tuple, Union
import torch.nn.functional as F
from torch import Tensor
from torch.nn import LSTM
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.aggr import Aggregation, MultiAggregation
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, Size
from torch_geometric.utils import add_self_loops, remove_self_loops,degree,softmax
class SAGEConv(MessagePassing):
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 \cdot
\mathrm{mean}_{j \in \mathcal{N(i)}} \mathbf{x}_j
If :obj:`project = True`, then :math:`\mathbf{x}_j` will first get
projected via
.. math::
\mathbf{x}_j \leftarrow \sigma ( \mathbf{W}_3 \mathbf{x}_j +
\mathbf{b})
as described in Eq. (3) of the paper.
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
aggr (string or Aggregation, optional): The aggregation scheme to use.
Any aggregation of :obj:`torch_geometric.nn.aggr` can be used,
*e.g.*, :obj:`"mean"`, :obj:`"max"`, or :obj:`"lstm"`.
(default: :obj:`"mean"`)
normalize (bool, optional): If set to :obj:`True`, output features
will be :math:`\ell_2`-normalized, *i.e.*,
:math:`\frac{\mathbf{x}^{\prime}_i}
{\| \mathbf{x}^{\prime}_i \|_2}`.
(default: :obj:`False`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
project (bool, optional): If set to :obj:`True`, the layer will apply a
linear transformation followed by an activation function before
aggregation (as described in Eq. (3) of the paper).
(default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **inputs:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`
- **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V_t}|, F_{out})` if bipartite
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
k_ricci,e_poinc,n_components,n_components_p,
aggr: Optional[Union[str, List[str], Aggregation]] = "mean",
normalize: bool = False,
root_weight: bool = True,
project: bool = False,
bias: bool = True,
**kwargs,
):
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.root_weight = root_weight
self.project = project
self.k_ricci = k_ricci
self.e_poinc = e_poinc
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
if aggr == 'lstm':
kwargs.setdefault('aggr_kwargs', {})
kwargs['aggr_kwargs'].setdefault('in_channels', in_channels[0])
kwargs['aggr_kwargs'].setdefault('out_channels', in_channels[0])
super().__init__(aggr, **kwargs)
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels]
self.hmpnn=create_wmlp(widths,in_channels[0],1)
self.ham=create_wmlp(widths_p,out_channels,1)
if self.project:
self.lin = Linear(in_channels[0], in_channels[0], bias=True)
if self.aggr is None:
self.fuse = False # No "fused" message_and_aggregate.
self.lstm = LSTM(in_channels[0], in_channels[0], batch_first=True)
if isinstance(self.aggr_module, MultiAggregation):
aggr_out_channels = self.aggr_module.get_out_channels(
in_channels[0])
else:
aggr_out_channels = in_channels[0]
self.lin_l = Linear(aggr_out_channels, out_channels, bias=bias)
if self.root_weight:
self.lin_r = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
if self.project:
self.lin.reset_parameters()
self.aggr_module.reset_parameters()
self.lin_l.reset_parameters()
if self.root_weight:
self.lin_r.reset_parameters()
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, alpha_hp: float,
size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
if self.project and hasattr(self, 'lin'):
x = (self.lin(x[0]).relu(), x[1])
# propagate_type: (x: OptPairTensor)
out_weight=self.hmpnn(self.k_ricci)
out_weight=softmax(out_weight,edge_index[0])
out = self.propagate(x=x,edge_index=edge_index,out_weight=out_weight)
out = self.lin_l(out)
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
out = out+alpha_hp*p_weight
x_r = x[1]
if self.root_weight and x_r is not None:
out += self.lin_r(x_r)
if self.normalize:
out = F.normalize(out, p=2., dim=-1)
return out
def message(self, x_j: Tensor, out_weight: Tensor) -> Tensor:
return out_weight*x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, aggr={self.aggr})')
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = SAGEConv(num_features, num_hidden,k_ricci,e_poinc,n_components,n_components_p)
self.conv2 = SAGEConv(num_hidden, num_classes,k_ricci,e_poinc,n_components,n_components_p)
def forward(self, data, alpha):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=0.6, training=self.training)
x = F.relu(self.conv1(x, edge_index, alpha))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data
| 9,477 | 38.327801 | 131 | py |
HyperIMBA | HyperIMBA-main/models/GatHyper.py | from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from torch_sparse import SparseTensor, set_diag
import math
import numpy as np
from typing import Any
from torch.nn import Sequential as seq, Parameter,LeakyReLU,init,Linear
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import (
Adj,
NoneType,
OptPairTensor,
OptTensor,
Size,
)
from torch_geometric.utils import add_self_loops, remove_self_loops, softmax
#from ..inits import glorot, zeros
class GATConv(MessagePassing):
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
heads,
k_ricci,e_poinc,n_components,n_components_p,
concat: bool = True,
negative_slope: float = 0.2,
dropout: float = 0.0,
add_self_loops: bool = True,
edge_dim: Optional[int] = None,
fill_value: Union[float, Tensor, str] = 'mean',
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.add_self_loops = add_self_loops
self.edge_dim = edge_dim
self.fill_value = fill_value
self.k_ricci = k_ricci
self.e_poinc = e_poinc
widths=[n_components,out_channels]
widths_p=[n_components_p,out_channels*heads]
self.hmpnn=create_wmlp(widths,out_channels,1)
self.ham=create_wmlp(widths_p,out_channels*heads,1)
# In case we are operating in bipartite graphs, we apply separate
# transformations 'lin_src' and 'lin_dst' to source and target nodes:
if isinstance(in_channels, int):
self.lin_src = Linear(in_channels, heads * out_channels,
bias=False, weight_initializer='glorot')
self.lin_dst = self.lin_src
else:
self.lin_src = Linear(in_channels[0], heads * out_channels, False,
weight_initializer='glorot')
self.lin_dst = Linear(in_channels[1], heads * out_channels, False,
weight_initializer='glorot')
# The learnable parameters to compute attention coefficients:
self.att_src = Parameter(torch.Tensor(1, heads, out_channels))
self.att_dst = Parameter(torch.Tensor(1, heads, out_channels))
if edge_dim is not None:
self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False,
weight_initializer='glorot')
self.att_edge = Parameter(torch.Tensor(1, heads, out_channels))
else:
self.lin_edge = None
self.register_parameter('att_edge', None)
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.lin_src.reset_parameters()
self.lin_dst.reset_parameters()
if self.lin_edge is not None:
self.lin_edge.reset_parameters()
glorot(self.att_src)
glorot(self.att_dst)
glorot(self.att_edge)
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, alpha_hp: float,
edge_attr: OptTensor = None, size: Size = None,
return_attention_weights=None):
H, C = self.heads, self.out_channels
# We first transform the input node features. If a tuple is passed, we
# transform source and target node features via separate weights:
if isinstance(x, Tensor):
assert x.dim() == 2, "Static graphs not supported in 'GATConv'"
x_src = x_dst = self.lin_src(x).view(-1, H, C)
else: # Tuple of source and target node features:
x_src, x_dst = x
assert x_src.dim() == 2, "Static graphs not supported in 'GATConv'"
x_src = self.lin_src(x_src).view(-1, H, C)
if x_dst is not None:
x_dst = self.lin_dst(x_dst).view(-1, H, C)
x = (x_src, x_dst)
# Next, we compute node-level attention coefficients, both for source
# and target nodes (if present):
alpha_src = (x_src * self.att_src).sum(dim=-1)
alpha_dst = None if x_dst is None else (x_dst * self.att_dst).sum(-1)
alpha = (alpha_src, alpha_dst)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
# We only want to add self-loops for nodes that appear both as
# source and target nodes:
num_nodes = x_src.size(0)
if x_dst is not None:
num_nodes = min(num_nodes, x_dst.size(0))
num_nodes = min(size) if size is not None else num_nodes
edge_index, edge_attr = remove_self_loops(
edge_index, edge_attr)
edge_index, edge_attr = add_self_loops(
edge_index, edge_attr, fill_value=self.fill_value,
num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
if self.edge_dim is None:
edge_index = set_diag(edge_index)
else:
raise NotImplementedError(
"The usage of 'edge_attr' and 'add_self_loops' "
"simultaneously is currently not yet supported for "
"'edge_index' in a 'SparseTensor' form")
# edge_updater_type: (alpha: OptPairTensor, edge_attr: OptTensor)
alpha = self.edge_updater(edge_index, alpha=alpha, edge_attr=edge_attr)
# propagate_type: (x: OptPairTensor, alpha: Tensor)
#hyperIMBA
out_weight=self.hmpnn(self.k_ricci)
out_weight=softmax(out_weight,edge_index[0])
alpha = out_weight
# alpha = alpha+out_weight
out = self.propagate(edge_index, x=x, alpha=alpha, size=size)
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
p_weight=self.ham(self.e_poinc)
p_weight=F.leaky_relu(p_weight)
out = out+alpha_hp*p_weight
if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def edge_update(self, alpha_j: Tensor, alpha_i: OptTensor,
edge_attr: OptTensor, index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
# Given edge-level attention coefficients for source and target nodes,
# we simply need to sum them up to "emulate" concatenation:
alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
if edge_attr is not None and self.lin_edge is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
edge_attr = self.lin_edge(edge_attr)
edge_attr = edge_attr.view(-1, self.heads, self.out_channels)
alpha_edge = (edge_attr * self.att_edge).sum(dim=-1)
alpha = alpha + alpha_edge
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return alpha
def message(self, x_j: Tensor, alpha: Tensor) -> Tensor:
return alpha.unsqueeze(1) * x_j
#return alpha.unsqueeze(-1) * x_j
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, heads={self.heads})')
def glorot(value: Any):
if isinstance(value, Tensor):
stdv = math.sqrt(6.0 / (value.size(-2) + value.size(-1)))
value.data.uniform_(-stdv, stdv)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
glorot(v)
for v in value.buffers() if hasattr(value, 'buffers') else []:
glorot(v)
def zeros(value: Any):
constant(value, 0.)
def constant(value: Any, fill_value: float):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
constant(v, fill_value)
for v in value.buffers() if hasattr(value, 'buffers') else []:
constant(v, fill_value)
def create_wmlp(widths,nfeato,lbias):
mlp_modules=[]
for k in range(len(widths)-1):
mlp_modules.append(Linear(widths[k],widths[k+1],bias=False))
mlp_modules.append(LeakyReLU(0.2,True))
mlp_modules.append(Linear(widths[len(widths)-1],nfeato,bias=lbias))
return seq(*mlp_modules)
class Net(torch.nn.Module):
def __init__(self,data,num_features,num_hidden,heads,num_classes,k_ricci,e_poinc,n_components,n_components_p):
super(Net, self).__init__()
self.conv1 = GATConv(num_features, num_hidden, heads,k_ricci,e_poinc,n_components,n_components_p)
self.conv2 = GATConv(num_hidden * heads, num_classes, 1,k_ricci,e_poinc,n_components,n_components_p,concat=False)
def forward(self, data, alpha):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=0.6, training=self.training)
x = F.relu(self.conv1(x, edge_index, alpha))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, alpha)
return F.log_softmax(x, dim=1)
def num(strings):
try:
return int(strings)
except ValueError:
return float(strings)
def call(data,name,num_features,num_classes,num_hidden):
#ricci
filename='hyperemb/'+name+'.edge_list'
f=open(filename)
cur_list=list(f)
if name=='Cora' or name == 'Actor' or name=='chameleon' or name=='squirrel':
ricci_cur=[[] for i in range(len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
else:
ricci_cur=[[] for i in range(2*len(cur_list))]
for i in range(len(cur_list)):
ricci_cur[i]=[num(s) for s in cur_list[i].split(' ',2)]
ricci_cur[i+len(cur_list)]=[ricci_cur[i][1],ricci_cur[i][0],ricci_cur[i][2]]
ricci_cur=sorted(ricci_cur)
k_ricci=[i[2] for i in ricci_cur]
k_ricci=k_ricci+[0 for i in range(data.x.size(0))]
k_ricci=torch.tensor(k_ricci, dtype=torch.float)
data.k_ricci=k_ricci.view(-1,1)
data.n_components=1
#poincare
data.edge_index, _ = remove_self_loops(data.edge_index)
keys=np.load('hyperemb/'+name+'_keys.npy')
values=np.load('hyperemb/'+name+'_values.npy')
e_poinc = dict(zip(keys, values))
data.n_components_p = values.shape[1]
alls = dict(enumerate(np.ones((data.num_nodes,data.n_components_p)), 0))
alls.update(e_poinc)
e_poinc = torch.tensor(np.array([alls[i] for i in alls]))
data.e_poinc = e_poinc.to(torch.float32)
data.edge_index, _ = add_self_loops(data.edge_index,num_nodes=data.x.size(0))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data.k_ricci = data.k_ricci.to(device)
data.e_poinc = data.e_poinc.to(device)
data = data.to(device)
model= Net(data,num_features,num_hidden//8,8,num_classes,data.k_ricci,data.e_poinc,data.n_components,data.n_components_p).to(device)
return model, data
| 12,092 | 39.043046 | 136 | py |
HyperIMBA | HyperIMBA-main/models/Poincare.py | import time
import networkx as nx
import tqdm
import numpy as np
def norm(x, axis=None):
return np.linalg.norm(x, axis=axis)
def poincare_dist(u, v, eps=1e-5):
d = 1 + 2 * norm(u-v)**2 / ((1 - norm(u)**2) * (1 - norm(v)**2) + eps)
return np.arccosh(d)
class PoincareModel():
def __init__(self, relations, node_weights, node_labels, n_components=2, eta=0.01, n_negative=10,
eps=1e-5, burn_in=10, burn_in_eta=0.01, init_lower=-0.001,
init_upper=0.001, dtype=np.float64, seed=0, name="", device='cuda', batch_size=None):
self.relations = relations
self.n_components = n_components
self.eta = eta # Learning rate for training
self.burn_in_eta = burn_in_eta # Learning rate for burn-in
self.n_negative = n_negative
self.eps = eps
self.burn_in = burn_in
self.dtype = dtype
self.init_lower = init_lower
self.init_upper = init_upper
self.node_weights = node_weights
self.node_labels = node_labels
self.network = nx.Graph()
self.name = name
self.device = device
self.batch_size = batch_size
def init_embeddings(self):
unique_nodes = np.unique([item for sublist in self.relations for item in sublist])
theta_init = np.random.uniform(self.init_lower, self.init_upper,
size=(len(unique_nodes), self.n_components))
embedding_dict = dict(zip(unique_nodes, theta_init))
self.nodes = unique_nodes
self.embeddings = theta_init
self.emb_dict = embedding_dict
def negative_sample(self, u):
positives = [x[1] for x in self.relations if x[0] == u]
negatives = np.array([x for x in self.nodes if x not in positives])
random_ix = np.random.permutation(len(negatives))[:self.n_negative]
neg_samples = [[u, x] for x in negatives[random_ix]]
neg_samples.append([u,u])
return neg_samples
def partial_d(self, theta, x):
alpha = 1 - norm(theta)**2
beta = 1 - norm(x)**2
gamma = 1 + 2/(alpha*beta + self.eps) * norm(theta-x)**2
lhs = 4 / (beta*np.sqrt(gamma**2 - 1) + self.eps)
rhs = 1/(alpha**2 + self.eps) * (norm(x)**2 - 2*np.inner(theta,x) + 1) * theta - x/(alpha + self.eps)
return lhs*rhs
def proj(self, theta):
if norm(theta) >= 1:
theta = theta/norm(theta) - self.eps
return theta
def update(self, u, grad):
theta = self.emb_dict[u]
step = 1/4 * self.eta*(1 - norm(theta)**2)**2 * grad
self.emb_dict[u] = self.proj(theta - step)
def train(self, num_epochs=10,edge_index=None):
node_rank = {}
for v in self.node_labels:
node_rank[v] = 1/self.node_labels[v]
if edge_index is not None:
self.relations = edge_index
for i in range(num_epochs):
losses=0
start = time.time()
for relation in tqdm.tqdm(self.relations):
u, v = relation[0], relation[1]
if u == v:
continue
# embedding vectors (theta, x) for relation (u, v)
theta, x = self.emb_dict[u], self.emb_dict[v]
# embedding vectors v' in sample negative relations (u, v')
neg_relations = [x[1] for x in self.negative_sample(u)]
neg_embed = np.array([self.emb_dict[x] for x in neg_relations])
# find partial derivatives of poincare distance
dd_theta = np.zeros(self.n_components)
dd_x = np.zeros(self.n_components)
if node_rank[u] > node_rank[v]:
dd_theta = self.partial_d(theta, x)
else:
dd_x = self.partial_d(x, theta)
if np.isnan(dd_theta.any()) or np.isinf(dd_theta.any()) or np.isnan(dd_x.any()) or np.isinf(dd_x.any()):
return
# find partial derivatives of loss function
dloss_theta = -1
dloss_x = -1
if node_rank[u] < node_rank[v]:
grad_theta = dloss_theta * dd_theta
self.update(u, grad_theta)
else:
grad_x = dloss_x * dd_x
self.update(v, grad_x)
# find gradients for negative samples
neg_loss = 0
neg_exp_dist = np.array([np.exp(-poincare_dist(theta, v_prime)) for v_prime in neg_embed])
Z = neg_exp_dist.sum(axis=0)
for vprime in neg_relations:
dloss_u = np.zeros(self.n_components)
if node_rank[u] < node_rank[vprime]:
dd_u = self.partial_d(theta, self.emb_dict[vprime])
dloss_u = -np.exp(-poincare_dist(theta, self.emb_dict[vprime])) / Z
grad_u = dd_u * dloss_u
self.update(u, grad_u)
loss = dloss_u
else:
dd_vprime = self.partial_d(self.emb_dict[vprime], theta)
dloss_vprime = -np.exp(-poincare_dist(self.emb_dict[vprime], theta)) / Z
grad_vprime = dd_vprime * dloss_vprime
self.update(vprime, grad_vprime)
loss = dloss_vprime
neg_loss += loss
pos_loss = np.exp(-poincare_dist(theta, x))
losses = -(losses)+(pos_loss+neg_loss)
| 5,663 | 41.268657 | 120 | py |
larq | larq-main/setup.py | from setuptools import find_packages, setup
def readme():
with open("README.md") as f:
return f.read()
setup(
name="larq",
version="0.13.3",
python_requires=">=3.7",
author="Plumerai",
author_email="opensource@plumerai.com",
description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://larq.dev/",
packages=find_packages(exclude=["larq.snapshots"]),
license="Apache 2.0",
install_requires=[
"numpy >= 1.15.4, < 2.0",
"terminaltables>=3.1.0",
"importlib-metadata >= 2.0, < 4.0 ; python_version<'3.8'",
"packaging>=19.2",
],
extras_require={
"tensorflow": ["tensorflow>=1.14.0"],
"tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
"test": [
"pytest==7.4.*",
"pytest-cov>=4.0,<4.2",
"pytest-xdist==3.2.*",
"pytest-mock==3.11.*",
"snapshottest==0.6.*",
],
"lint": [
"black==23.7.0",
"flake8==6.0.*",
"isort==5.11.*",
"pytype==2022.10.26",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 2,122 | 32.171875 | 97 | py |
larq | larq-main/larq/optimizers_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow import keras
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers import legacy as optimizers # type: ignore
else:
from tensorflow.keras import optimizers # type: ignore
def _test_optimizer(
optimizer, target=0.75, test_kernels_are_binary=True, trainable_bn=True
):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=0, input_shape=(10,), num_classes=2
)
y_train = keras.utils.to_categorical(y_train)
model = lq_testing_utils.get_small_bnn_model(
x_train.shape[1], 20, y_train.shape[1], trainable_bn=trainable_bn
)
model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["acc"])
initial_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
trained_vars = [tf.keras.backend.get_value(w) for w in model.trainable_weights]
# check all trainable variables have actually been updated
for v0, v1 in zip(initial_vars, trained_vars):
assert not np.all(v0 == v1)
# Note that when kernels are treated as latent weights they need not be
# binary (see https://arxiv.org/abs/1906.02107 for further discussion)
if test_kernels_are_binary:
for layer in model.layers:
if "quant" in layer.name:
for weight in layer.trainable_weights:
assert np.all(np.isin(tf.keras.backend.get_value(weight), [-1, 1]))
assert history.history["acc"][-1] >= target
def _test_serialization(optimizer):
config = keras.optimizers.serialize(optimizer)
optim = keras.optimizers.deserialize(config)
new_config = keras.optimizers.serialize(optim)
assert config == new_config
class TestCaseOptimizer:
def test_type_check_predicate(self):
with pytest.raises(TypeError):
# pytype: disable=wrong-arg-types
lq.optimizers.CaseOptimizer((False, lq.optimizers.Bop()))
# pytype: enable=wrong-arg-types
def test_type_check_optimizer(self):
with pytest.raises(TypeError):
lq.optimizers.CaseOptimizer((lq.optimizers.Bop.is_binary_variable, False))
def test_type_check_default(self):
with pytest.raises(TypeError):
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=False,
)
def test_overlapping_predicates(self):
with pytest.raises(ValueError):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lambda var: True, lq.optimizers.Bop()),
(lambda var: True, lq.optimizers.Bop()),
)
_test_optimizer(naughty_case_opt)
def test_missing_default(self):
with pytest.warns(Warning):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
)
# Simple MNIST model
mnist = tf.keras.datasets.mnist
(train_images, train_labels), _ = mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=naughty_case_opt,
metrics=["acc"],
)
# Should raise on first call to apply_gradients()
model.fit(train_images[:1], train_labels[:1], epochs=1)
def test_wrong_predicate(self):
"""Make sure we throw when an optimizer does not claim variables."""
with pytest.raises(ValueError):
naughty_case_opt = lq.optimizers.CaseOptimizer(
(lambda var: False, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
)
# Simple MNIST model
mnist = tf.keras.datasets.mnist
(train_images, train_labels), _ = mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=naughty_case_opt,
metrics=["acc"],
)
# Should raise on first call to apply_gradients()
model.fit(train_images[:1], train_labels[:1], epochs=1)
def test_weights(self):
(train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
lq.layers.QuantDense(
64,
input_quantizer="ste_sign",
kernel_quantizer=lq.quantizers.NoOp(precision=1),
activation="relu",
),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.SGD(0.1, momentum=0.9),
),
)
model.fit(train_images[:1], train_labels[:1], epochs=1)
opt_weights = model.optimizer.weights
# SGD with momentum and Bop both create a single momentum variable per weight
# and one variable each to keep track of iterations
assert len(opt_weights) == len(model.weights) + 2
checked_weights = 0
for opt in model.optimizer.optimizers:
for weight in opt.weights:
assert weight is opt_weights[checked_weights]
checked_weights += 1
assert checked_weights == len(opt_weights)
@pytest.mark.usefixtures("eager_mode")
def test_checkpoint(self, tmp_path):
# Build and run a simple model.
var = tf.Variable([2.0])
opt = optimizers.SGD(1.0, momentum=1.0)
opt = lq.optimizers.CaseOptimizer((lambda var: True, opt))
opt.minimize(lambda: var + 1.0, var_list=[var])
slot_var = opt.optimizers[0].get_slot(var, "momentum")
slot_value = slot_var.numpy().item()
# Save a checkpoint.
checkpoint = tf.train.Checkpoint(optimizer=opt, var=var)
save_path = checkpoint.save(tmp_path / "ckpt")
# Run model again.
opt.minimize(lambda: var + 1.0, var_list=[var])
assert slot_var.numpy().item() != slot_value
# Load checkpoint and ensure loss scale is back to its original value.
status = checkpoint.restore(save_path)
status.assert_consumed()
status.run_restore_ops()
assert slot_var.numpy().item() == slot_value
class TestBopOptimizer:
def test_bop_accuracy(self):
_test_optimizer(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
)
# test optimizer on model with only binary trainable vars (low accuracy)
_test_optimizer(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
trainable_bn=False,
target=0,
)
@pytest.mark.usefixtures("distribute_scope")
def test_mixed_precision(self):
opt = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
)
try:
opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
except AttributeError:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
opt, "dynamic"
)
_test_optimizer(opt, test_kernels_are_binary=True)
def test_bop_tf_1_14_schedules(self):
_test_optimizer(
lq.optimizers.CaseOptimizer(
(
lq.optimizers.Bop.is_binary_variable,
lq.optimizers.Bop(
threshold=tf.keras.optimizers.schedules.InverseTimeDecay(
3.0, decay_steps=1.0, decay_rate=0.5
),
gamma=tf.keras.optimizers.schedules.InverseTimeDecay(
3.0, decay_steps=1.0, decay_rate=0.5
),
),
),
default_optimizer=optimizers.Adam(0.01),
),
test_kernels_are_binary=True,
)
def test_bop_serialization(self):
_test_serialization(
lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=optimizers.Adam(0.01),
),
)
@pytest.mark.parametrize(
"hyper",
[5e-4, tf.keras.optimizers.schedules.PolynomialDecay(5e-4, 100)],
)
def test_bop_serialization_schedule(self, hyper):
bop = lq.optimizers.Bop(
gamma=hyper,
threshold=hyper,
)
new_bop = lq.optimizers.Bop.from_config(bop.get_config())
assert isinstance(new_bop._get_hyper("gamma"), type(bop._get_hyper("gamma")))
assert isinstance(
new_bop._get_hyper("threshold"), type(bop._get_hyper("threshold"))
)
| 10,528 | 37.01083 | 88 | py |
larq | larq-main/larq/callbacks.py | from typing import Any, Callable, MutableMapping, Optional
from tensorflow import keras
class HyperparameterScheduler(keras.callbacks.Callback):
"""Generic hyperparameter scheduler.
!!! example
```python
bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
adam = tf.keras.optimizers.Adam(0.01)
optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, bop), default_optimizer=adam,
)
callbacks = [
HyperparameterScheduler(lambda x: 0.001 * (0.1 ** (x // 30)), "gamma", bop)
]
```
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new hyperparameter as output.
hyperparameter: str. the name of the hyperparameter to be scheduled.
optimizer: the optimizer that contains the hyperparameter that will be scheduled.
Defaults to `self.model.optimizer` if `optimizer == None`.
update_freq: str (optional), denotes on what update_freq to change the
hyperparameter. Can be either "epoch" (default) or "step".
verbose: int. 0: quiet, 1: update messages.
log_name: str (optional), under which name to log this hyperparameter to
Tensorboard. If `None`, defaults to `hyperparameter`. Use this if you have
several schedules for the same hyperparameter on different optimizers.
"""
def __init__(
self,
schedule: Callable,
hyperparameter: str,
optimizer: Optional[keras.optimizers.Optimizer] = None,
update_freq: str = "epoch",
verbose: int = 0,
log_name: Optional[str] = None,
):
super().__init__()
self.optimizer = optimizer
self.schedule = schedule
self.hyperparameter = hyperparameter
self.log_name = log_name or hyperparameter
self.verbose = verbose
if update_freq not in ["epoch", "step"]:
raise ValueError(
"HyperparameterScheduler.update_freq can only be 'step' or 'epoch'."
f" Received value '{update_freq}'"
)
self.update_freq = update_freq
def set_model(self, model: keras.models.Model) -> None:
super().set_model(model)
if self.optimizer is None:
# It is not possible for a model to reach this state and not have
# an optimizer, so we can safely access it here.
self.optimizer = model.optimizer
if not hasattr(self.optimizer, self.hyperparameter):
raise ValueError(
f'Optimizer must have a "{self.hyperparameter}" attribute.'
)
def set_hyperparameter(self, t: int) -> Any:
hp = getattr(self.optimizer, self.hyperparameter)
try: # new API
hyperparameter_val = keras.backend.get_value(hp)
hyperparameter_val = self.schedule(t, hyperparameter_val)
except TypeError: # Support for old API for backward compatibility
hyperparameter_val = self.schedule(t)
keras.backend.set_value(hp, hyperparameter_val)
return hp
def on_batch_begin(
self, batch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
if not self.update_freq == "step":
return
# We use optimizer.iterations (i.e. global step), since batch only
# reflects the batch index in the current epoch.
batch = keras.backend.get_value(self.optimizer.iterations)
hp = self.set_hyperparameter(batch)
if self.verbose > 0:
print(
f"Batch {batch}: {self.log_name} is now {keras.backend.get_value(hp)}."
)
def on_epoch_begin(
self, epoch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
if not self.update_freq == "epoch":
return
hp = self.set_hyperparameter(epoch)
if self.verbose > 0:
print(
f"Epoch {epoch}: {self.log_name} is now {keras.backend.get_value(hp)}."
)
def on_epoch_end(
self, epoch: int, logs: Optional[MutableMapping[str, Any]] = None
) -> None:
logs = logs or {}
hp = getattr(self.optimizer, self.hyperparameter)
logs[self.log_name] = keras.backend.get_value(hp)
| 4,375 | 36.724138 | 89 | py |
larq | larq-main/larq/quantizers.py | """A Quantizer defines the way of transforming a full precision input to a
quantized output and the pseudo-gradient method used for the backwards pass.
Quantizers can either be used through quantizer arguments that are supported
for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they
can be used similar to activations, i.e. either through an `Activation` layer,
or through the `activation` argument supported by all forward layers:
```python
import tensorflow as tf
import larq as lq
...
x = lq.layers.QuantDense(64, activation=None)(x)
x = lq.layers.QuantDense(64, input_quantizer="ste_sign")(x)
```
is equivalent to:
```python
x = lq.layers.QuantDense(64)(x)
x = tf.keras.layers.Activation("ste_sign")(x)
x = lq.layers.QuantDense(64)(x)
```
as well as:
```python
x = lq.layers.QuantDense(64, activation="ste_sign")(x)
x = lq.layers.QuantDense(64)(x)
```
We highly recommend using the first of these formulations: for the
other two formulations, intermediate layers - like batch normalization or
average pooling - and shortcut connections may result in non-binary input
to the convolutions.
Quantizers can either be referenced by string or called directly.
The following usages are equivalent:
```python
lq.layers.QuantDense(64, kernel_quantizer="ste_sign")
```
```python
lq.layers.QuantDense(64, kernel_quantizer=lq.quantizers.SteSign(clip_value=1.0))
```
"""
from typing import Callable, Union
import tensorflow as tf
from packaging import version
from larq import context, math
from larq import metrics as lq_metrics
from larq import utils
__all__ = [
"ApproxSign",
"DoReFa",
"DoReFaQuantizer",
"MagnitudeAwareSign",
"NoOp",
"NoOpQuantizer",
"Quantizer",
"SteHeaviside",
"SteSign",
"SteTern",
"SwishSign",
]
def _clipped_gradient(x, dy, clip_value):
"""Calculate `clipped_gradent * dy`."""
if clip_value is None:
return dy
zeros = tf.zeros_like(dy)
mask = tf.math.less_equal(tf.math.abs(x), clip_value)
return tf.where(mask, dy, zeros)
def ste_sign(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return math.sign(x), grad
return _call(x)
def _scaled_sign(x): # pragma: no cover
return 1.3 * ste_sign(x)
@tf.custom_gradient
def approx_sign(x: tf.Tensor) -> tf.Tensor:
def grad(dy):
abs_x = tf.math.abs(x)
zeros = tf.zeros_like(dy)
mask = tf.math.less_equal(abs_x, 1.0)
return tf.where(mask, (1 - abs_x) * 2 * dy, zeros)
return math.sign(x), grad
def swish_sign(x: tf.Tensor, beta: float = 5.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
b_x = beta * x
return dy * beta * (2 - b_x * tf.tanh(b_x * 0.5)) / (1 + tf.cosh(b_x))
return math.sign(x), grad
return _call(x)
def ste_tern(
x: tf.Tensor,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
if ternary_weight_networks:
threshold = 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)
else:
threshold = threshold_value
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad
return _call(x)
def ste_heaviside(x: tf.Tensor, clip_value: float = 1.0) -> tf.Tensor:
@tf.custom_gradient
def _call(x):
def grad(dy):
return _clipped_gradient(x, dy, clip_value)
return math.heaviside(x), grad
return _call(x)
class Quantizer(tf.keras.layers.Layer):
"""Common base class for defining quantizers.
# Attributes
precision: An integer defining the precision of the output. This value will be
used by `lq.models.summary()` for improved logging.
"""
precision = None
def compute_output_shape(self, input_shape):
return input_shape
class _BaseQuantizer(Quantizer):
"""Private base class for defining quantizers with Larq metrics."""
def __init__(self, *args, metrics=None, **kwargs):
self._custom_metrics = metrics
super().__init__(*args, **kwargs)
def build(self, input_shape):
if self._custom_metrics and "flip_ratio" in self._custom_metrics:
self.flip_ratio = lq_metrics.FlipRatio(name=f"flip_ratio/{self.name}")
self.flip_ratio.build(input_shape)
super().build(input_shape)
def call(self, inputs):
if hasattr(self, "flip_ratio"):
self.add_metric(self.flip_ratio(inputs))
return inputs
@property
def non_trainable_weights(self):
return []
@utils.register_keras_custom_object
class NoOp(_BaseQuantizer):
r"""Instantiates a serializable no-op quantizer.
\\[
q(x) = x
\\]
!!! warning
This quantizer will not change the input variable. It is only intended to mark
variables with a desired precision that will be recognized by optimizers like
`Bop` and add training metrics to track variable changes.
!!! example
```python
layer = lq.layers.QuantDense(
16, kernel_quantizer=lq.quantizers.NoOp(precision=1),
)
layer.build((32,))
assert layer.kernel.precision == 1
```
# Arguments
precision: Set the desired precision of the variable. This can be used to tag
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
"""
precision = None
def __init__(self, precision: int, **kwargs):
self.precision = precision
super().__init__(**kwargs)
def get_config(self):
return {**super().get_config(), "precision": self.precision}
# `NoOp` used to be called `NoOpQuantizer`; this alias is for
# backwards-compatibility.
NoOpQuantizer = NoOp
@utils.register_alias("ste_sign")
@utils.register_keras_custom_object
class SteSign(_BaseQuantizer):
r"""Instantiates a serializable binary quantizer.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq \texttt{clip_value} \\\
0 & \left|x\right| > \texttt{clip_value}
\end{cases}\\]
```plot-activation
quantizers.SteSign
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Binarized Neural Networks: Training Deep Neural Networks with Weights and
Activations Constrained to +1 or -1](https://arxiv.org/abs/1602.02830)
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_sign(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("approx_sign")
@utils.register_keras_custom_object
class ApproxSign(_BaseQuantizer):
r"""Instantiates a serializable binary quantizer.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the ApproxSign method.
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
(2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
0 & \left|x\right| > 1
\end{cases}
\\]
```plot-activation
quantizers.ApproxSign
```
# Arguments
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
Representational Capability and Advanced Training
Algorithm](https://arxiv.org/abs/1808.00278)
"""
precision = 1
def call(self, inputs):
outputs = approx_sign(inputs)
return super().call(outputs)
@utils.register_alias("ste_heaviside")
@utils.register_keras_custom_object
class SteHeaviside(_BaseQuantizer):
r"""
Instantiates a binarization quantizer with output values 0 and 1.
\\[
q(x) = \begin{cases}
+1 & x > 0 \\\
0 & x \leq 0
\end{cases}
\\]
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq 1 \\\
0 & \left|x\right| > 1
\end{cases}\\]
```plot-activation
quantizers.SteHeaviside
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
AND Binarization function
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_heaviside(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("swish_sign")
@utils.register_keras_custom_object
class SwishSign(_BaseQuantizer):
r"""Sign binarization function.
\\[
q(x) = \begin{cases}
-1 & x < 0 \\\
1 & x \geq 0
\end{cases}
\\]
The gradient is estimated using the SignSwish method.
\\[
\frac{\partial q_{\beta}(x)}{\partial x} = \frac{\beta\left\\{2-\beta x \tanh \left(\frac{\beta x}{2}\right)\right\\}}{1+\cosh (\beta x)}
\\]
```plot-activation
quantizers.SwishSign
```
# Arguments
beta: Larger values result in a closer approximation to the derivative of the
sign.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
SwishSign quantization function
# References
- [BNN+: Improved Binary Network Training](https://arxiv.org/abs/1812.11800)
"""
precision = 1
def __init__(self, beta: float = 5.0, **kwargs):
self.beta = beta
super().__init__(**kwargs)
def call(self, inputs):
outputs = swish_sign(inputs, beta=self.beta)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "beta": self.beta}
@utils.register_alias("magnitude_aware_sign")
@utils.register_keras_custom_object
class MagnitudeAwareSign(_BaseQuantizer):
r"""Instantiates a serializable magnitude-aware sign quantizer for Bi-Real Net.
A scaled sign function computed according to Section 3.3 in
[Zechun Liu et al](https://arxiv.org/abs/1808.00278).
```plot-activation
quantizers._scaled_sign
```
# Arguments
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
Representational Capability and Advanced Training
Algorithm](https://arxiv.org/abs/1808.00278)
"""
precision = 1
def __init__(self, clip_value: float = 1.0, **kwargs):
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
scale_factor = tf.stop_gradient(
tf.reduce_mean(tf.abs(inputs), axis=list(range(len(inputs.shape) - 1)))
)
outputs = scale_factor * ste_sign(inputs, clip_value=self.clip_value)
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "clip_value": self.clip_value}
@utils.register_alias("ste_tern")
@utils.register_keras_custom_object
class SteTern(_BaseQuantizer):
r"""Instantiates a serializable ternarization quantizer.
\\[
q(x) = \begin{cases}
+1 & x > \Delta \\\
0 & |x| < \Delta \\\
-1 & x < - \Delta
\end{cases}
\\]
where \\(\Delta\\) is defined as the threshold and can be passed as an argument,
or can be calculated as per the Ternary Weight Networks original paper, such that
\\[
\Delta = \frac{0.7}{n} \sum_{i=1}^{n} |W_i|
\\]
where we assume that \\(W_i\\) is generated from a normal distribution.
The gradient is estimated using the Straight-Through Estimator
(essentially the Ternarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & \left|x\right| \leq \texttt{clip_value} \\\
0 & \left|x\right| > \texttt{clip_value}
\end{cases}\\]
```plot-activation
quantizers.SteTern
```
# Arguments
threshold_value: The value for the threshold, \\(\Delta\\).
ternary_weight_networks: Boolean of whether to use the
Ternary Weight Networks threshold calculation.
clip_value: Threshold for clipping gradients. If `None` gradients are not
clipped.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# References
- [Ternary Weight Networks](https://arxiv.org/abs/1605.04711)
"""
precision = 2
def __init__(
self,
threshold_value: float = 0.05,
ternary_weight_networks: bool = False,
clip_value: float = 1.0,
**kwargs,
):
self.threshold_value = threshold_value
self.ternary_weight_networks = ternary_weight_networks
self.clip_value = clip_value
super().__init__(**kwargs)
def call(self, inputs):
outputs = ste_tern(
inputs,
threshold_value=self.threshold_value,
ternary_weight_networks=self.ternary_weight_networks,
clip_value=self.clip_value,
)
return super().call(outputs)
def get_config(self):
return {
**super().get_config(),
"threshold_value": self.threshold_value,
"ternary_weight_networks": self.ternary_weight_networks,
"clip_value": self.clip_value,
}
@utils.register_alias("dorefa_quantizer")
@utils.register_keras_custom_object
class DoReFa(_BaseQuantizer):
r"""Instantiates a serializable k_bit quantizer as in the DoReFa paper.
\\[
q(x) = \begin{cases}
0 & x < \frac{1}{2n} \\\
\frac{i}{n} & \frac{2i-1}{2n} < x < \frac{2i+1}{2n} \text{ for } i \in \\{1,n-1\\}\\\
1 & \frac{2n-1}{2n} < x
\end{cases}
\\]
where \\(n = 2^{\text{k_bit}} - 1\\). The number of bits, k_bit, needs to be passed
as an argument.
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
\\[\frac{\partial q(x)}{\partial x} = \begin{cases}
1 & 0 \leq x \leq 1 \\\
0 & \text{else}
\end{cases}\\]
The behavior for quantizing weights should be different in comparison to
the quantization of activations:
instead of limiting input operands (or in this case: weights) using a hard
limiter, a tangens hyperbolicus is applied to achieve a softer limiting
with a gradient, which is continuously differentiable itself.
\\[
w_{lim}(w) = \tanh(w)
\\]
Furthermore, the weights of each layer are normed, such that the weight with
the largest magnitude gets the largest or smallest (depending on its sign)
quantizable value. That way, the full quantizable numeric range is utilized.
\\[
w_{norm}(w) = \frac{w}{\max(|w|)}
\\]
The formulas can be found in the paper in section 2.3. Please note, that
the paper refers to weights being quantized on a numeric range of [-1, 1], while
activations are quantized on the numeric range [0, 1]. This implementation
uses the same ranges as specified in the paper.
The activation quantizer defines the function quantizek() from the paper with
the correct numeric range of [0, 1]. The weight quantization mode adds
pre- and post-processing for numeric range adaptions, soft limiting and
norming. The full quantization function including the adaption of numeric ranges is
\\[
q(w) = 2 \, quantize_{k}(\frac{w_{norm}\left(w_{lim}\left(w\right)\right)}{2} + \frac{1}{2}) - 1
\\]
!!! warning
The weight mode works for weights on the range [-1, 1], which matches the
default setting of `constraints.weight_clip`. Do not use this quantizer
with a different constraint `clip_value` than the default one.
__`mode == "activations"`__
```plot-activation
quantizers.DoReFa
```
__`mode == "weights"`__
```plot-activation
quantizers.DoReFa(mode='weights')
```
# Arguments
k_bit: number of bits for the quantization.
mode: `"activations"` for clipping inputs on [0, 1] range or `"weights"` for
soft-clipping and norming weights on [-1, 1] range before applying
quantization.
metrics: An array of metrics to add to the layer. If `None` the metrics set in
`larq.context.metrics_scope` are used. Currently only the `flip_ratio`
metric is available.
# Returns
Quantization function
# Raises
ValueError for bad value of `mode`.
# References
- [DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low
Bitwidth Gradients](https://arxiv.org/abs/1606.06160)
"""
precision = None
def __init__(self, k_bit: int = 2, mode: str = "activations", **kwargs):
self.precision = k_bit
if mode not in ("activations", "weights"):
raise ValueError(
f"Invalid DoReFa quantizer mode {mode}. "
"Valid values are 'activations' and 'weights'."
)
self.mode = mode
super().__init__(**kwargs)
def weight_preprocess(self, inputs):
# Limit inputs to [-1, 1] range
limited = tf.math.tanh(inputs)
# Divider for max-value norm.
dividend = tf.math.reduce_max(tf.math.abs(limited))
# Need to stop the gradient here. Otherwise, for the maximum element,
# which gives the dividend, normed is limited/limited (for this one
# maximum digit). The derivative of y = x/x, dy/dx is just zero, when
# one does the simplification y = x/x = 1. But TF does NOT do this
# simplification when computing the gradient for the
# normed = limited/dividend operation. As a result, this gradient
# becomes complicated, because during the computation, "dividend" is
# not just a constant, but depends on "limited" instead. Here,
# tf.stop_gradient is used to mark "dividend" as a constant explicitly.
dividend = tf.stop_gradient(dividend)
# Norm and then scale from value range [-1,1] to [0,1] (the range
# expected by the core quantization operation).
# If the dividend used for the norm operation is 0, all elements of
# the weight tensor are 0 and divide_no_nan returns 0 for all weights.
# So if all elements of the weight tensor are zero, nothing is normed.
return tf.math.divide_no_nan(limited, 2.0 * dividend) + 0.5
def call(self, inputs):
# Depending on quantizer mode (activation or weight) just clip inputs
# on [0, 1] range or use weight preprocessing method.
if self.mode == "activations":
inputs = tf.clip_by_value(inputs, 0.0, 1.0)
elif self.mode == "weights":
inputs = self.weight_preprocess(inputs)
else:
raise ValueError(
f"Invalid DoReFa quantizer mode {self.mode}. "
"Valid values are 'activations' and 'weights'."
)
@tf.custom_gradient
def _k_bit_with_identity_grad(x):
n = 2**self.precision - 1
return tf.round(x * n) / n, lambda dy: dy
outputs = _k_bit_with_identity_grad(inputs)
# Scale weights from [0, 1] quantization range back to [-1,1] range
if self.mode == "weights":
outputs = 2.0 * outputs - 1.0
return super().call(outputs)
def get_config(self):
return {**super().get_config(), "k_bit": self.precision, "mode": self.mode}
# `DoReFa` used to be called `DoReFaQuantizer`; this alias is for
# backwards-compatibility.
DoReFaQuantizer = DoReFa
QuantizerType = Union[Quantizer, Callable[[tf.Tensor], tf.Tensor]]
def serialize(quantizer: tf.keras.layers.Layer, use_legacy_format=False):
if use_legacy_format and version.parse(tf.__version__) >= version.parse("2.13"):
return tf.keras.utils.legacy.serialize_keras_object(quantizer)
return tf.keras.utils.serialize_keras_object(quantizer)
def deserialize(name, custom_objects=None, use_legacy_format=False):
if use_legacy_format and version.parse(tf.__version__) >= version.parse("2.13"):
return tf.keras.utils.legacy.deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="quantization function",
)
return tf.keras.utils.deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="quantization function",
)
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
if isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
return get(config)
if callable(identifier):
return identifier
raise ValueError(
f"Could not interpret quantization function identifier: {identifier}"
)
def get_kernel_quantizer(identifier):
"""Returns a quantizer from identifier and adds default kernel quantizer metrics.
# Arguments
identifier: Function or string
# Returns
`Quantizer` or `None`
"""
quantizer = get(identifier)
if isinstance(quantizer, _BaseQuantizer) and not quantizer._custom_metrics:
quantizer._custom_metrics = list(context.get_training_metrics())
return quantizer
| 23,775 | 30.449735 | 141 | py |
larq | larq-main/larq/context.py | """Context managers that configure global behaviour of Larq."""
import contextlib
import threading
__all__ = [
"metrics_scope",
"quantized_scope",
"get_training_metrics",
"should_quantize",
]
_quantized_scope = threading.local()
_quantized_scope.should_quantize = False
@contextlib.contextmanager
def quantized_scope(quantize):
"""A context manager to define the behaviour of `QuantizedVariable`.
!!! example
```python
model.save("full_precision_model.h5") # save full precision latent weights
fp_weights = model.get_weights() # get latent weights
with larq.context.quantized_scope(True):
model.save("binary_model.h5") # save binarized weights
weights = model.get_weights() # get binarized weights
```
# Arguments
quantize: If `should_quantize` is `True`, `QuantizedVariable` will return their
quantized value in the forward pass. If `False`, `QuantizedVariable` will
act as a latent variable.
"""
backup = should_quantize()
_quantized_scope.should_quantize = quantize
yield quantize
_quantized_scope.should_quantize = backup
def should_quantize():
"""Returns the current quantized scope."""
return getattr(_quantized_scope, "should_quantize", False)
_global_training_metrics = set()
_available_metrics = {"flip_ratio"}
@contextlib.contextmanager
def metrics_scope(metrics=[]):
"""A context manager to set the training metrics to be used in quantizers.
!!! example
```python
with larq.context.metrics_scope(["flip_ratio"]):
model = tf.keras.models.Sequential(
[larq.layers.QuantDense(3, kernel_quantizer="ste_sign", input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
```
# Arguments
metrics: Iterable of metrics to add to quantizers defined inside this context.
Currently only the `flip_ratio` metric is available.
"""
for metric in metrics:
if metric not in _available_metrics:
raise ValueError(
f"Unknown training metric '{metric}'. Available metrics: {_available_metrics}."
)
backup = _global_training_metrics.copy()
_global_training_metrics.update(metrics)
yield _global_training_metrics
_global_training_metrics.clear()
_global_training_metrics.update(backup)
def get_training_metrics():
"""Retrieves a live reference to the training metrics in the current scope.
Updating and clearing training metrics using `larq.context.metrics_scope` is
preferred, but `get_training_metrics` can be used to directly access them.
!!! example
```python
get_training_metrics().clear()
get_training_metrics().add("flip_ratio")
```
# Returns
A set of training metrics in the current scope.
"""
return _global_training_metrics
| 2,953 | 29.453608 | 95 | py |
larq | larq-main/larq/conftest.py | import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from larq import context as lq_context
if version.parse(tf.__version__) >= version.parse("1.15"):
strategy_combinations.set_virtual_cpus_to_at_least(3)
distributed_devices = ["/cpu:1", "/cpu:2"]
else:
distributed_devices = ["/cpu:0"]
@pytest.fixture
def eager_mode():
"""pytest fixture for running test in eager mode"""
with context.eager_mode():
yield
@pytest.fixture
def graph_mode():
"""pytest fixture for running test in graph mode"""
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield
tf.keras.backend.clear_session()
@pytest.fixture(params=["eager", "graph"])
def eager_and_graph_mode(request):
"""pytest fixture for running test in eager and graph mode"""
if request.param == "graph":
with context.graph_mode():
with tf.compat.v1.Session().as_default():
yield request.param
tf.keras.backend.clear_session()
else:
with context.eager_mode():
yield request.param
@pytest.fixture(params=["graph", "tf_eager", "tf_keras_eager"])
def keras_should_run_eagerly(request):
"""Fixture to run in graph and two eager modes.
The modes are:
- Graph mode
- TensorFlow eager and Keras eager
- TensorFlow eager and Keras not eager
The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
should run eagerly.
"""
if request.param == "graph":
if version.parse(tf.__version__) >= version.parse("2"):
pytest.skip("Skipping graph mode for TensorFlow 2+.")
with context.graph_mode():
yield
else:
with context.eager_mode():
yield request.param == "tf_keras_eager"
@pytest.fixture(params=[False, True])
def distribute_scope(request):
if request.param is True:
with tf.distribute.MirroredStrategy(distributed_devices).scope():
yield request.param
else:
yield request.param
@pytest.fixture(params=[True, False])
def quantized(request):
"""pytest fixture for running test quantized and non-quantized"""
with lq_context.quantized_scope(request.param):
yield request.param
@pytest.fixture(params=["channels_last", "channels_first"])
def data_format(request):
return request.param
| 2,508 | 27.511364 | 85 | py |
larq | larq-main/larq/version_test.py | import larq
def test_version():
assert hasattr(larq, "__version__") and "." in larq.__version__
| 102 | 16.166667 | 67 | py |
larq | larq-main/larq/context_test.py | import pytest
from larq import context
def test_scope():
assert context.get_training_metrics() == set()
with context.metrics_scope(["flip_ratio"]):
assert context.get_training_metrics() == {"flip_ratio"}
assert context.get_training_metrics() == set()
with pytest.raises(ValueError, match=r".*unknown_metric.*"):
with context.metrics_scope(["flip_ratio", "unknown_metric"]):
pass
| 426 | 29.5 | 69 | py |
larq | larq-main/larq/math.py | """Math operations that are specific to extremely quantized networks."""
import tensorflow as tf
def sign(x):
r"""A sign function that will never be zero
\\[
f(x) = \begin{cases}
-1 & x < 0 \\\
\hphantom{-}1 & x \geq 0
\end{cases}
\\]
This function is similar to
[`tf.math.sign`](https://www.tensorflow.org/api_docs/python/tf/math/sign) but will
return a binary value and will never be zero.
# Arguments
`x`: Input Tensor
# Returns
A Tensor with same type as `x`.
"""
return tf.sign(tf.sign(x) + 0.1)
def heaviside(x):
r"""Heaviside step function with output values 0 and 1.
\\[
q(x) = \begin{cases}
+1 & x > 0 \\\
\hphantom{+}0 & x \leq 0
\end{cases}
\\]
# Arguments
`x`: Input Tensor
# Returns
A Tensor with same type as `x`.
"""
return tf.sign(tf.nn.relu(x))
| 909 | 19.222222 | 86 | py |
larq | larq-main/larq/testing_utils.py | import numpy as np
import tensorflow as tf
import larq as lq
def _eval_tensor(tensor):
if tensor is None:
return None
elif callable(tensor):
return _eval_helper(tensor())
else:
return tensor.numpy()
def _eval_helper(tensors):
if tensors is None:
return None
return tf.nest.map_structure(_eval_tensor, tensors)
def evaluate(tensors):
if tf.executing_eagerly():
return _eval_helper(tensors)
else:
sess = tf.compat.v1.get_default_session()
return sess.run(tensors)
def generate_real_values_with_zeros(low=-2, high=2, shape=(4, 10)):
real_values = np.random.uniform(low, high, shape)
real_values = np.insert(real_values, 1, 0, axis=1)
return real_values
def get_small_bnn_model(input_dim, num_hidden, output_dim, trainable_bn=True):
model = tf.keras.models.Sequential()
model.add(
lq.layers.QuantDense(
units=num_hidden,
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
activation="relu",
input_shape=(input_dim,),
use_bias=False,
)
)
model.add(tf.keras.layers.BatchNormalization(trainable=trainable_bn))
model.add(
lq.layers.QuantDense(
units=output_dim,
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
input_quantizer="ste_sign",
activation="softmax",
use_bias=False,
)
)
return model
def random_input(shape):
for i, dim in enumerate(shape):
if dim is None:
shape[i] = np.random.randint(1, 4)
data = 10 * np.random.random(shape) - 0.5
return data.astype("float32")
# This is a fork of https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/testing_utils.py#L72
# as recommended in https://github.com/tensorflow/tensorflow/issues/28601#issuecomment-492810252
def layer_test(
layer_cls,
kwargs=None,
input_shape=None,
input_dtype=None,
input_data=None,
expected_output=None,
expected_output_dtype=None,
should_run_eagerly=False,
):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Shape tuple for the expected shape of the output.
expected_output_dtype: Data type expected for the output.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError("input_shape is None")
if not input_dtype:
input_dtype = "float32"
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == "float":
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test in functional API
x = tf.keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if tf.keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError(
"When testing layer %s, for input %s, found output "
"dtype=%s but expected to find %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
tf.keras.backend.dtype(y),
expected_output_dtype,
kwargs,
)
)
# check shape inference
model = tf.keras.models.Model(x, y)
expected_output_shape = tuple(
layer.compute_output_shape(tf.TensorShape(input_shape)).as_list()
)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
"When testing layer %s, for input %s, found output_shape="
"%s but expected to find %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs,
)
)
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = tf.keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# Recreate layer to prevent layer metrics from being configured multiple times.
layer = layer_cls(**kwargs)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# train(). This was causing some error for layer with Defun as it body.
# See b/120160788 for more details. This should be mitigated after 2.0.
model = tf.keras.models.Model(x, layer(x))
model.compile(
"rmsprop",
"mse",
weighted_metrics=["acc"],
run_eagerly=should_run_eagerly,
)
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config["batch_input_shape"] = input_shape
layer = layer.__class__.from_config(layer_config)
model = tf.keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
"When testing layer %s **after deserialization**, "
"for input %s, found output_shape="
"%s but expected to find inferred shape %s.\nFull kwargs: %s"
% (
layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs,
)
)
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = tf.keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=2e-3)
# for further checks in the caller function
return actual_output
| 7,954 | 34.044053 | 117 | py |
larq | larq-main/larq/quantized_variable.py | """Contains QuantizedVariable, a variable that can be quantized in the forward pass."""
from typing import Optional
import tensorflow as tf
from packaging import version
from tensorflow.python.distribute.values import DistributedVariable
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from larq import context
from larq.quantizers import QuantizerType
# pytype: disable=import-error
try:
from tensorflow.python.distribute.ps_values import AggregatingVariable
from tensorflow.python.types.core import Tensor as TensorType
except ModuleNotFoundError:
TensorType = object
from tensorflow.python.distribute.values import AggregatingVariable
# pytype: enable=import-error
UNSPECIFIED = object()
_SUPPORTS_TRACE_TYPE = version.parse(tf.__version__) >= version.parse("2.8")
if _SUPPORTS_TRACE_TYPE:
try:
from tensorflow.types.experimental import TraceType
except ImportError:
from tensorflow.python.types.trace import TraceType
class QuantizedVariableSpec(TraceType):
"""TraceType for QuantizedVariableSpec for tracing with tf.function.
This class implements the Type for QuantizedVariable used in tracing.
"""
def __init__(self, value):
self.latent_variable = value
def is_subtype_of(self, other) -> bool:
"""If the other spec is the same as `self`, return True."""
return self == other
def most_specific_common_supertype(self, others):
"""`self` is the common supertype if all input types match it."""
return self if all(self == other for other in others) else None
def placeholder_value(self, placeholder_context=None):
"""Use the QuantizedVariable value itself as a placeholder."""
return self.latent_variable
def _cast(self, value, _):
return value
def _to_tensors(self, value):
return []
def __hash__(self) -> int:
return hash(id(self.latent_variable))
def __eq__(self, other) -> bool:
return self is other
class QuantizedVariable(tf.Variable, TensorType):
"""A Variable that can be quantized in the forward pass in applicable contexts."""
def __init__(
self,
variable: tf.Variable,
quantizer: Optional[QuantizerType] = None,
precision: Optional[int] = None,
op: Optional[tf.Operation] = UNSPECIFIED,
):
"""Creates an QuantizedVariable instance.
# Arguments
variable: A floating-point resource variable to wrap.
quantizer: An optional quantizer to transform the floating-point
variable to a fake quantized variable.
precision: An optional integer defining the precision of the quantized
variable. If `None`, `quantizer.precision` is used.
op: An optional operation of this variable.
"""
if not resource_variable_ops.is_resource_variable(variable):
raise ValueError(
"`variable` must be of type `tf.ResourceVariable`, "
f"but got `{type(variable)}`."
)
if not (quantizer is None or callable(quantizer)):
raise ValueError(
"`quantizer` must be `callable` or `None`, "
f"but got `{type(quantizer)}`."
)
if not (precision is None or type(precision) == int):
raise ValueError(
"`precision` must be of type `int` or `None`, "
f"but got `{type(precision)}`."
)
self.latent_variable = variable
self.quantizer = quantizer
self.precision = precision or getattr(quantizer, "precision", None)
self._op = op
@classmethod
def from_variable(
cls,
variable: tf.Variable,
quantizer: Optional[QuantizerType] = None,
precision: Optional[int] = None,
op: Optional[tf.Operation] = UNSPECIFIED,
):
"""Creates a QuantizedVariable that wraps another variable.
This typically just returns `QuantizedVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both QuantizedVariable and
variable.__class__. This is so the returned variable will still pass
`isinstance(variable, variable.__class__)`, which is required for
DistributedVariables and its subclasses to work properly.
# Arguments
variable: A floating-point resource variable to wrap.
quantizer: An optional quantizer to transform the floating-point variable to
a fake quantized variable.
precision: An optional integer defining the precision of the quantized
variable. If `None`, `quantizer.precision` is used.
op: An optional operation of this variable.
# Returns
A QuantizedVariable that wraps the variable.
"""
if not isinstance(variable, (DistributedVariable, AggregatingVariable)):
return cls(variable, quantizer, precision, op=op)
class QuantizedDistributedVariable(cls, variable.__class__):
"""A QuantizedVariable that also subclasses from `variable.__class__`.
`variable.__class__` is either a `DistributedVariable` or an
`AggregatingVariable`.
"""
def get(self, *args, **kwargs):
# For some reason this is needed to make unit `x + x` pass on TF 1.14
return self._quantize(self.latent_variable.get(*args, **kwargs))
return QuantizedDistributedVariable(variable, quantizer, precision, op=op)
def _quantize(self, value):
if self.quantizer and context.should_quantize():
return self.quantizer(value)
return value
def value(self):
return self._quantize(self.latent_variable.value())
def read_value(self):
return self._quantize(self.latent_variable.read_value())
def numpy(self):
return self._quantize(self.latent_variable).numpy()
def sparse_read(self, *args, **kwargs):
return self._quantize(self.latent_variable.sparse_read(*args, **kwargs))
def gather_nd(self, *args, **kwargs):
return self._quantize(self.latent_variable.gather_nd(*args, **kwargs))
def __getattr__(self, name):
return getattr(self.latent_variable, name)
def _dense_var_to_tensor(self, *args, **kwargs):
return self._quantize(
self.latent_variable._dense_var_to_tensor(*args, **kwargs)
)
def eval(self, session=None):
return self._quantize(self.latent_variable).eval(session=session)
def initialized_value(self):
return self._quantize(self.latent_variable.initialized_value())
@property
def initial_value(self):
return self._quantize(self.latent_variable.initial_value)
def __tf_tensor__(
self, dtype: Optional[tf.dtypes.DType] = None, name: Optional[str] = None
) -> tf.Tensor:
return self._dense_var_to_tensor(dtype=dtype, name=name)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
@staticmethod
def _get_name(obj) -> str:
try:
return obj.__name__
except AttributeError:
return obj.__class__.__name__
def __repr__(self) -> str:
repr_ = (
f"<{self.__class__.__name__} '{self.name}' "
f"shape={self.shape} dtype={self.dtype.name}"
)
if self.quantizer is not None:
repr_ += f" quantizer={self._get_name(self.quantizer)}"
if self.precision is not None:
repr_ += f" precision={self.precision}"
if tf.executing_eagerly() and not self._in_graph_mode:
return f"{repr_} numpy={ops.numpy_text(self.read_value(), is_repr=True)}>"
return f"{repr_}>"
# Method delegations: We delegate the following methods to self.latent_variable.
# Each of these methods simply calls the same method on self.latent_variable. The
# base Variable raises NotImplementedError for most of these, so we must
# override them.
#
# We do not define the following methods from Variable for the following
# reasons:
# * 'ref': Instead we inherit the definition from Variable.
# If we defined and delegated to Variable, the ref of an QuantizedVariable
# would be the same as the ref of the underlying variable, which would be
# strange as they are different Python objects.
def set_shape(self, *args, **kwargs):
return self.latent_variable.set_shape(*args, **kwargs)
@property
def trainable(self):
return self.latent_variable.trainable
@property
def synchronization(self):
return self.latent_variable.synchronization
@property
def aggregation(self):
return self.latent_variable.aggregation
@property
def constraint(self):
return self.latent_variable.constraint
def _apply_assign_update(
self, update_fn, value, use_locking=None, name=None, read_value=True
):
if ops.executing_eagerly_outside_functions():
assign_op = update_fn(value, use_locking, name, False)
if read_value:
return QuantizedVariable.from_variable(
self.latent_variable, self.quantizer, self.precision, op=assign_op
)
return assign_op
# Fallback to wrapping the returned variable in graph mode if possible
assign_var = update_fn(value, use_locking, name, read_value)
if read_value and resource_variable_ops.is_resource_variable(assign_var):
return QuantizedVariable.from_variable(
assign_var, self.quantizer, self.precision
)
return assign_var
def _apply_update(self, update_fn, *args, **kwargs):
update_var = update_fn(*args, **kwargs)
if ops.executing_eagerly_outside_functions():
return self
# Fallback to wrapping the returned variable in graph mode if possible
if resource_variable_ops.is_resource_variable(update_var):
return QuantizedVariable.from_variable(
update_var, self.quantizer, self.precision
)
return update_var
def assign(self, value, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign, value, use_locking, name, read_value
)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign_add, delta, use_locking, name, read_value
)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
return self._apply_assign_update(
self.latent_variable.assign_sub, delta, use_locking, name, read_value
)
def scatter_sub(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_sub, *args, **kwargs)
def scatter_add(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_add, *args, **kwargs)
def scatter_max(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_max, *args, **kwargs)
def scatter_min(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_min, *args, **kwargs)
def scatter_mul(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_mul, *args, **kwargs)
def scatter_div(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_div, *args, **kwargs)
def scatter_update(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_update, *args, **kwargs)
def batch_scatter_update(self, *args, **kwargs):
return self._apply_update(
self.latent_variable.batch_scatter_update, *args, **kwargs
)
def scatter_nd_sub(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_nd_sub, *args, **kwargs)
def scatter_nd_add(self, *args, **kwargs):
return self._apply_update(self.latent_variable.scatter_nd_add, *args, **kwargs)
def scatter_nd_update(self, *args, **kwargs):
return self._apply_update(
self.latent_variable.scatter_nd_update, *args, **kwargs
)
def count_up_to(self, *args, **kwargs):
return self.latent_variable.count_up_to(*args, **kwargs)
def load(self, *args, **kwargs):
return self.latent_variable.load(*args, **kwargs)
@property
def dtype(self):
return self.latent_variable.dtype
@property
def name(self):
return self.latent_variable.name
@property
def _shared_name(self):
return self.latent_variable._shared_name
@property
def initializer(self):
return self.latent_variable.initializer
@property
def device(self):
return self.latent_variable.device
@property
def op(self):
if self._op is not UNSPECIFIED:
return self._op
return self.latent_variable.op
@property
def graph(self):
return self.latent_variable.graph
@property
def shape(self):
return self.latent_variable.shape
def get_shape(self):
return self.latent_variable.get_shape()
def __tf_tracing_type__(self, context):
if _SUPPORTS_TRACE_TYPE:
return QuantizedVariableSpec(self)
return NotImplemented
def _gather_saveables_for_checkpoint(self):
# By delegating this method to the wrapped variable, checkpoints with
# QuantizedVariables are identical to checkpoints with normal variables.
# Therefore models checkpointed with QuantizedVariables can be restored on
# models with normal variables, and vice versa.
return self.latent_variable._gather_saveables_for_checkpoint()
def _map_resources(self, *args):
# By delegating this method to the wrapped variable, SavedModel with
# QuantizedVariables are identical to SavedModel with normal variables.
obj_map, resource_map = self.latent_variable._map_resources(*args)
obj_map[self] = obj_map[self.latent_variable]
return obj_map, resource_map
def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):
# By delegating this method to the wrapped variable, SavedModel with
# QuantizedVariables are identical to SavedModel with normal variables.
resource_list = self.latent_variable._export_to_saved_model_graph(
object_map, tensor_map, options, **kwargs
)
object_map[self] = object_map[self.latent_variable]
return resource_list
# TODO: Maybe encode the fact the variable is an QuantizedVariable in to_proto().
def to_proto(self, *args, **kwargs):
return self.latent_variable.to_proto(*args, **kwargs)
def from_proto(self, *args, **kwargs):
return self.latent_variable.from_proto(*args, **kwargs)
# Delegate the private attributes _handle_name and _initializer_op to
# self.latent_variable. SavedModel sets these attributes when loading a model. For
# example, it sets _handle_name here:
# https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/keras/saving/saved_model/load.py#L211
# We need to expose these attributes on AutoCastVariable as well for
# SavedModel to work properly.
# TODO: Find a better way to support SavedModel. Exposing private attributes is
# hacky and difficult to maintain.
# For more info see https://github.com/tensorflow/tensorflow/commit/1fcda57f37c2ac854cabf1c3462eb14e39d36c60
@property
def _handle_name(self):
return self.latent_variable._handle_name
@_handle_name.setter
def _handle_name(self, handle_name):
self.latent_variable._handle_name = handle_name
@property
def _initializer_op(self):
return self.latent_variable._initializer_op
@_initializer_op.setter
def _initializer_op(self, initializer_op):
self.latent_variable._initializer_op = initializer_op
def _as_graph_element(self):
if self.quantizer and context.should_quantize():
return self.quantizer(self.latent_variable)
graph_element = self.latent_variable._as_graph_element()
if graph_element is None:
return self._op
return graph_element
QuantizedVariable._OverloadAllOperators()
tf.register_tensor_conversion_function(
QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
try:
ops.register_dense_tensor_like_type(QuantizedVariable)
except AttributeError:
pass
| 17,061 | 36.915556 | 148 | py |
larq | larq-main/larq/optimizers.py | """Neural networks with extremely low-precision weights and activations, such as
Binarized Neural Networks (BNNs), usually contain a mix of low-precision weights (e.g.
1-bit) and higher-precision weights (e.g. 8-bit, 16-bit, or 32-bit). Examples of this
include the first and last layers of image classificiation models, which have
higher-precision weights in most BNN architectures from the literature.
Training a BNN, then, consists of optimizing both low-precision and higher-precision
weights. In `larq`, we provide a mechanism to target different bit-precision variables
with different optimizers using the `CaseOptimizer` class. Modeled after the
[`tf.case`](https://www.tensorflow.org/api_docs/python/tf/case) signature,
`CaseOptimizer` accepts pairs of predicates and optimizers. A predicate, given a
variable, decides whether its optimizer should train that variable.
A `CaseOptimizer` behaves much like any other
[Keras optimizer](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers), and
once you instantiate it you can pass it to your `model.compile()` as usual. To
instantiate a `CaseOptimzer`, pass one or a list of `(predicate, optimizer)` tuples,
along with a `default` optimizer which trains any variables not claimed by another
optimizer. A variable may not be claimed by more than one optimizer's predicate.
!!! example
```python
no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
case_optimizer = lq.optimizers.CaseOptimizer(
(
lq.optimizers.Bop.is_binary_variable, # predicate
lq.optimizers.Bop(threshold=1e-6, gamma=1e-3), # optimizer
),
default_optimizer=tf.keras.optimizers.Adam(0.01),
)
```
"""
import warnings
from copy import deepcopy
from typing import Callable, Optional, Tuple
import tensorflow as tf
from packaging import version
import larq as lq
from larq import utils
__all__ = ["Bop", "CaseOptimizer"]
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers.legacy import Optimizer # type: ignore
else:
from tensorflow.keras.optimizers import Optimizer # type: ignore
# From https://github.com/keras-team/keras/blob/a8606fd45b760cce3e65727e9d62cae796c45930/keras/optimizer_v2/optimizer_v2.py#L1430-L1450
def _var_key(var):
"""Key for representing a primary variable, for looking up slots.
In graph mode the name is derived from the var shared name.
In eager mode the name is derived from the var unique id.
If distribution strategy exists, get the primary variable first.
Args:
var: the variable.
Returns:
the unique name of the variable.
"""
# Get the distributed variable if it exists.
if hasattr(var, "_distributed_container"):
var = var._distributed_container()
if var._in_graph_mode:
return var._shared_name
return var._unique_id
@utils.register_keras_custom_object
class CaseOptimizer(Optimizer):
"""An optmizer wrapper that applies different optimizers to a subset of variables.
An optimizer is used to train a variable iff its accompanying predicate evaluates to
`True`.
For each variable, at most one optimizer's predicate may evaluate to `True`. If no
optimizer's predicate evaluates to `True` for a variable, it is trained with the
`default_optimizer`. If a variable is claimed by no optimizers and
`default_optimizer == None`, the variable is not trained.
# Arguments
predicate_optimizer_pairs: One or more `(pred, tf.keras.optimizers.legacy.Optimizer)`
pairs, where `pred` takes one `tf.Variable` as argument and returns `True`
if the optimizer should be used for that variable, e.g. `pred(var) == True`.
default_optimizer: A `tf.keras.optimizers.legacy.Optimizer` to be applied to any
variable not claimed by any other optimizer. (Must be passed as keyword
argument.)
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
*predicate_optimizer_pairs: Tuple[Callable[[tf.Variable], bool], Optimizer],
default_optimizer: Optional[Optimizer] = None,
name: str = "optimizer_case",
):
super().__init__(name=name)
# Type checks for (predicate, optimizer) pairs
for i, (predicate, optimizer) in enumerate(predicate_optimizer_pairs):
if not callable(predicate):
raise TypeError(
f"Expected callable predicate at `predicate_optimizer_pairs[{i}][0]` but got `{type(predicate)}`."
)
if not isinstance(optimizer, Optimizer):
raise TypeError(
f"Expected `tf.keras.optimizers.legacy.Optimizer` at `predicate_optimizer_pairs[{i}][1]` but got `{type(optimizer)}`."
)
# Type check for default optimizers
if default_optimizer is not None and not isinstance(
default_optimizer, Optimizer
):
raise TypeError(
f"Expected `Optimizer` for `default_optimizer` but got `{type(default_optimizer)}`."
)
self.pred_opt_pairs = predicate_optimizer_pairs
self.default = default_optimizer
self.var_opt_mapping = None
# List of optimizers ending in `default_optimizer`, for easier internal access
self.optimizers = [opt for (_, opt) in self.pred_opt_pairs]
if self.default:
self.optimizers.append(self.default)
self.DEFAULT_OPT_INDEX = len(self.pred_opt_pairs)
# Track optimizers to support reloading via tf.train.Checkpoint
for i, optimizer in enumerate(self.optimizers):
self._track_trackable(optimizer, name=f"optimizer_{i}")
@property
def weights(self):
weights = []
for optimizer in self.optimizers:
weights.extend(optimizer.weights)
return weights
@Optimizer.iterations.setter
def iterations(self, variable):
raise NotImplementedError("CaseOptimzer does not support setting iterations.")
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
"""Apply gradients to variables for each optimizer.
On the first call to `apply_gradients()`, compute the mapping from variables to
optimizers and cache it in the `self.var_opt_mapping` dict for serialization and
faster access.
"""
if self.var_opt_mapping is None:
# Convert `grads_and_vars` to list so we can iterate multiple times over it
grads_and_vars = list(grads_and_vars)
self._compute_var_opt_mapping(grads_and_vars)
# Split gradients and variables into a separate list for each optimizer
grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)]
for grad, var in grads_and_vars:
var_key = _var_key(var)
if var_key in self.var_opt_mapping:
grad_var_lists[self.var_opt_mapping[var_key]].append((grad, var))
with tf.init_scope():
_ = self.iterations
# This is only necessary in TF 2.0 and older, but doesn't hurt on newer versions
for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists):
optimizer._create_slots([v for (_, v) in opt_grads_and_vars])
return tf.distribute.get_replica_context().merge_call(
self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs
)
def _apply_gradients(self, distribution, grad_var_lists, name, **kwargs):
# Apply gradients to each optimizer
with tf.name_scope(self._name):
train_ops = [
distribution.extended.call_for_each_replica(
optimizer.apply_gradients, args=(opt_grads_and_vars,), kwargs=kwargs
)
for optimizer, opt_grads_and_vars in zip(
self.optimizers, grad_var_lists
)
]
return tf.group(*train_ops, name=name or "train_with_group")
def get_config(self):
optimizer_configs = [opt.get_config() for (_, opt) in self.pred_opt_pairs]
default_config = self.default.get_config()
config = {
"optimizer_configs": [
{"class_name": optimizer_config["name"], "config": optimizer_config}
for optimizer_config in optimizer_configs
],
"default_config": {
"class_name": default_config["name"],
"config": default_config,
},
"var_opt_mapping": self.var_opt_mapping, # serialized instead of `pred`s
}
return {**super().get_config(), **config}
@classmethod
def from_config(cls, original_config, custom_objects=None):
config = deepcopy(original_config)
case_optimizer = cls(
*[ # `(pred, opt)` tuples
(
lambda _: False, # placeholder callable (`pred` is not serialized)
tf.keras.optimizers.deserialize( # optimizer `opt`
opt_config, custom_objects=custom_objects
),
)
for opt_config in config["optimizer_configs"]
],
default_optimizer=tf.keras.optimizers.deserialize(
config["default_config"], custom_objects=custom_objects
),
)
# Since we no longer have the `pred`s, we set the mapping explicitly
case_optimizer.var_opt_mapping = config["var_opt_mapping"]
return case_optimizer
def _compute_var_opt_mapping(self, grads_and_vars):
"""Compute a unique mapping from variables to optimizer indices."""
self.var_opt_mapping = {}
for _, var in grads_and_vars:
num_optimizers = 0
var_key = _var_key(var)
# Find the optimizer(s) that want to claim this variable
for optimizer_index, (predicate, _) in enumerate(self.pred_opt_pairs):
if predicate(var):
self.var_opt_mapping[var_key] = optimizer_index
num_optimizers += 1
if num_optimizers > 1:
raise ValueError(f"Variable `{var}` claimed by multiple optimizers.")
if num_optimizers == 0:
if self.default is not None:
self.var_opt_mapping[var_key] = self.DEFAULT_OPT_INDEX
else:
warnings.warn(
f"No `default_optimizer` provided to train variable `{var}`."
)
# Make sure that each optimizer touches at least one variable
for optimizer_index, (_, optimizer) in enumerate(self.pred_opt_pairs):
if optimizer_index not in self.var_opt_mapping.values():
raise ValueError(
f"Optimizer `{optimizer}` did not claim any variables."
)
@utils.register_keras_custom_object
class Bop(Optimizer):
"""Binary optimizer (Bop).
Bop is a latent-free optimizer for Binarized Neural Networks (BNNs) and
Binary Weight Networks (BWN).
Bop maintains an exponential moving average of the gradients controlled by
`gamma`. If this average exceeds the `threshold`, a weight is flipped.
The hyperparameter `gamma` is somewhat analogues to the learning rate in
SGD methods: a high `gamma` results in rapid convergence but also makes
training more noisy.
Note that the default `threshold` is not optimal for all situations.
Setting the threshold too high results in little learning, while setting it
too low results in overly noisy behaviour.
!!! warning
The `is_binary_variable` check of this optimizer will only target variables that
have been explicitly marked as being binary using `NoOp(precision=1)`.
!!! example
```python
no_op_quantizer = lq.quantizers.NoOp(precision=1)
layer = lq.layers.QuantDense(16, kernel_quantizer=no_op_quantizer)
optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, lq.optimizers.Bop()),
default_optimizer=tf.keras.optimizers.Adam(0.01), # for FP weights
)
```
# Arguments
threshold: magnitude of average gradient signal required to flip a weight.
gamma: the adaptivity rate.
name: name of the optimizer.
# References
- [Latent Weights Do Not Exist: Rethinking Binarized Neural Network Optimization](https://papers.nips.cc/paper/8971-latent-weights-do-not-exist-rethinking-binarized-neural-network-optimization)
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self, threshold: float = 1e-8, gamma: float = 1e-4, name: str = "Bop", **kwargs
):
super().__init__(name=name, **kwargs)
self._set_hyper("threshold", threshold)
self._set_hyper("gamma", gamma)
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, "m")
def _get_decayed_hyper(self, name: str, var_dtype):
hyper = self._get_hyper(name, var_dtype)
if isinstance(hyper, tf.keras.optimizers.schedules.LearningRateSchedule):
local_step = tf.cast(self.iterations, var_dtype)
hyper = tf.cast(hyper(local_step), var_dtype)
return hyper
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
gamma = self._get_decayed_hyper("gamma", var_dtype)
threshold = self._get_decayed_hyper("threshold", var_dtype)
m = self.get_slot(var, "m")
m_t = m.assign_add(gamma * (grad - m))
var_t = lq.math.sign(-tf.sign(var * m_t - threshold) * var)
return var.assign(var_t).op
def get_config(self):
config = {
"threshold": self._serialize_hyperparameter("threshold"),
"gamma": self._serialize_hyperparameter("gamma"),
}
return {**super().get_config(), **config}
@classmethod
def from_config(cls, config, custom_objects=None):
for hyper in ("gamma", "threshold"):
if hyper in config and isinstance(config[hyper], dict):
config[hyper] = tf.keras.optimizers.schedules.deserialize(
config[hyper], custom_objects=custom_objects
)
return cls(**config)
@staticmethod
def is_binary_variable(var: tf.Variable) -> bool:
"""Returns `True` for variables with `var.precision == 1`.
This is an example of a predictate that can be used by the `CaseOptimizer`.
# Arguments
var: a `tf.Variable`.
"""
return getattr(var, "precision", 32) == 1
| 14,848 | 39.350543 | 201 | py |
larq | larq-main/larq/math_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("fn", [lq.math.sign])
def test_sign(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([-1, 1], size=(2, 5)).astype(np.float32)
result = f(binarized_values)[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f(real_values)[0]
assert not np.any(result == 0)
assert np.all(result[real_values < 0] == -1)
assert np.all(result[real_values >= 0] == 1)
zero_values = np.zeros((2, 5))
result = f(zero_values)[0]
assert np.all(result == 1)
@pytest.mark.parametrize("fn", [lq.math.heaviside])
def test_heaviside(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([0, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values <= 0] == 0)
assert np.all(result[real_values > 0] == 1)
| 1,299 | 31.5 | 80 | py |
larq | larq-main/larq/layers_base.py | import logging
from typing import Optional
import tensorflow as tf
from larq import context, quantizers, utils
from larq.quantized_variable import QuantizedVariable
from larq.quantizers import NoOp, QuantizerType
log = logging.getLogger(__name__)
def _is_binary(quantizer):
return getattr(quantizer, "precision", None) == 1 and not isinstance(
quantizer, NoOp
)
def _compute_padded_size(stride, dilation_rate, input_size, filter_size):
if input_size is None:
return None
effective_filter_size = (filter_size - 1) * dilation_rate + 1
output_size = (input_size + stride - 1) // stride
padded_size = (output_size - 1) * stride + effective_filter_size
if tf.is_tensor(input_size):
return tf.math.maximum(padded_size, input_size)
return max(padded_size, input_size)
def _compute_padding(stride, dilation_rate, input_size, filter_size):
padded_size = _compute_padded_size(stride, dilation_rate, input_size, filter_size)
total_padding = padded_size - input_size
padding = total_padding // 2
return padding, padding + (total_padding % 2)
class BaseLayer(tf.keras.layers.Layer):
"""Base class for defining quantized layers.
`input_quantizer` is the element-wise quantization functions to use.
If `input_quantizer=None` this layer is equivalent to `tf.keras.layers.Layer`.
"""
def __init__(self, *args, input_quantizer=None, **kwargs):
self.input_quantizer = quantizers.get(input_quantizer)
super().__init__(*args, **kwargs)
def call(self, inputs):
if self.input_quantizer:
inputs = self.input_quantizer(inputs)
with context.quantized_scope(True):
return super().call(inputs)
def get_config(self):
return {
**super().get_config(),
"input_quantizer": quantizers.serialize(self.input_quantizer),
}
def _get_quantizer(self, name) -> Optional[QuantizerType]:
"""Get quantizer for given kernel name"""
return None
def _add_variable_with_custom_getter(self, name: str, **kwargs):
quantizer = self._get_quantizer(name)
if quantizer is None:
return super()._add_variable_with_custom_getter(name, **kwargs)
old_getter = kwargs.pop("getter")
# Wrap `getter` with a version that returns a `QuantizedVariable`.
def getter(*args, **kwargs):
variable = old_getter(*args, **kwargs)
return QuantizedVariable.from_variable(variable, quantizer)
return super()._add_variable_with_custom_getter(name, getter=getter, **kwargs)
class QuantizerBase(BaseLayer):
"""Base class for defining quantized layers with a single kernel.
`kernel_quantizer` is the element-wise quantization functions to use.
If `kernel_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(self, *args, kernel_quantizer=None, **kwargs):
self.kernel_quantizer = quantizers.get_kernel_quantizer(kernel_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.kernel_quantizer) and not self.kernel_constraint:
log.warning(
"Using a binary weight quantizer without setting `kernel_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
return self.kernel_quantizer if name == "kernel" else None
def get_config(self):
return {
**super().get_config(),
"kernel_quantizer": quantizers.serialize(self.kernel_quantizer),
}
class QuantizerBaseConv(tf.keras.layers.Layer):
"""Base class for defining quantized conv layers"""
def __init__(self, *args, pad_values=0.0, **kwargs):
self.pad_values = pad_values
super().__init__(*args, **kwargs)
is_zero_padding = not tf.is_tensor(self.pad_values) and self.pad_values == 0.0
self._is_native_padding = self.padding != "same" or is_zero_padding
if self.padding == "causal" and not is_zero_padding:
raise ValueError("Causal padding with `pad_values != 0` is not supported.")
def _get_spatial_padding_same(self, shape):
return [
_compute_padding(stride, dilation_rate, shape[i], filter_size)
for i, (stride, dilation_rate, filter_size) in enumerate(
zip(self.strides, self.dilation_rate, self.kernel_size)
)
]
def _get_spatial_shape(self, input_shape):
return (
input_shape[1:-1]
if self.data_format == "channels_last"
else input_shape[2:]
)
def _get_padding_same(self, inputs):
input_shape = inputs.shape
if not input_shape[1:].is_fully_defined():
input_shape = tf.shape(inputs)
padding = self._get_spatial_padding_same(self._get_spatial_shape(input_shape))
return (
[[0, 0], *padding, [0, 0]]
if self.data_format == "channels_last"
else [[0, 0], [0, 0], *padding]
)
def _get_padding_same_shape(self, input_shape):
spatial_input_shape = self._get_spatial_shape(input_shape)
spatial_shape = [
_compute_padded_size(stride, dilation, size, filter_size)
for size, stride, dilation, filter_size in zip(
spatial_input_shape,
self.strides,
self.dilation_rate,
self.kernel_size,
)
]
if self.data_format == "channels_last":
return tf.TensorShape([input_shape[0], *spatial_shape, input_shape[-1]])
return tf.TensorShape([*input_shape[:2], *spatial_shape])
def build(self, input_shape):
if self._is_native_padding:
super().build(input_shape)
else:
with utils.patch_object(self, "padding", "valid"):
super().build(self._get_padding_same_shape(input_shape))
def call(self, inputs):
if self._is_native_padding:
return super().call(inputs)
inputs = tf.pad(
inputs, self._get_padding_same(inputs), constant_values=self.pad_values
)
with utils.patch_object(self, "padding", "valid"):
return super().call(inputs)
def get_config(self):
return {
**super().get_config(),
"pad_values": tf.keras.backend.get_value(self.pad_values),
}
class QuantizerDepthwiseBase(BaseLayer):
"""Base class for defining depthwise quantized layers
`depthwise_quantizer` is the element-wise quantization functions to use.
If `depthwise_quantizer=None` this layer is equivalent to `BaseLayer`.
"""
def __init__(
self,
*args,
depthwise_quantizer: Optional[QuantizerType] = None,
**kwargs,
):
self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.depthwise_quantizer) and not self.depthwise_constraint:
log.warning(
"Using a binary weight quantizer without setting `depthwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
return self.depthwise_quantizer if name == "depthwise_kernel" else None
def get_config(self):
return {
**super().get_config(),
"depthwise_quantizer": quantizers.serialize(self.depthwise_quantizer),
}
class QuantizerSeparableBase(BaseLayer):
"""Base class for defining separable quantized layers.
`depthwise_quantizer` and `pointwise_quantizer` are the element-wise quantization
functions to use. If all quantization functions are `None` this layer is equivalent
to `BaseLayer`.
"""
def __init__(
self,
*args,
depthwise_quantizer: Optional[QuantizerType] = None,
pointwise_quantizer: Optional[QuantizerType] = None,
**kwargs,
):
self.depthwise_quantizer = quantizers.get_kernel_quantizer(depthwise_quantizer)
self.pointwise_quantizer = quantizers.get_kernel_quantizer(pointwise_quantizer)
super().__init__(*args, **kwargs)
if _is_binary(self.depthwise_quantizer) and not self.depthwise_constraint:
log.warning(
"Using a binary `depthwise_quantizer` without setting `depthwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
if _is_binary(self.pointwise_quantizer) and not self.pointwise_constraint:
log.warning(
"Using a binary `pointwise_quantizer` without setting `pointwise_constraint` "
"may result in starved weights (where the gradient is always zero)."
)
def _get_quantizer(self, name: str) -> Optional[QuantizerType]:
if name == "depthwise_kernel":
return self.depthwise_quantizer
if name == "pointwise_kernel":
return self.pointwise_quantizer
return None
def get_config(self):
return {
**super().get_config(),
"depthwise_quantizer": quantizers.serialize(self.depthwise_quantizer),
"pointwise_quantizer": quantizers.serialize(self.pointwise_quantizer),
}
| 9,491 | 35.933852 | 94 | py |
larq | larq-main/larq/utils.py | from contextlib import contextmanager
import tensorflow as tf
def memory_as_readable_str(num_bits: int) -> str:
"""Generate a human-readable string for the memory size.
1 KiB = 1024 B; we use the binary prefix (KiB) [1,2] instead of the decimal prefix
(KB) to avoid any confusion with multiplying by 1000 instead of 1024.
[1] https://en.wikipedia.org/wiki/Binary_prefix
[2] https://physics.nist.gov/cuu/Units/binary.html
"""
suffixes = ["B", "KiB", "MiB", "GiB"]
num_bytes = num_bits / 8
for i, suffix in enumerate(suffixes):
rounded = num_bytes / (1024**i)
if rounded < 1024:
break
return f"{rounded:,.2f} {suffix}"
def register_keras_custom_object(cls):
"""See https://github.com/tensorflow/addons/blob/master/tensorflow_addons/utils/keras_utils.py#L25"""
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def register_alias(name: str):
"""A decorator to register a custom keras object under a given alias.
!!! example
```python
@utils.register_alias("degeneration")
class Degeneration(tf.keras.metrics.Metric):
pass
```
"""
def register_func(cls):
tf.keras.utils.get_custom_objects()[name] = cls
return cls
return register_func
def set_precision(precision: int = 32):
"""A decorator to set the precision of a quantizer function
# Arguments
precision: An integer defining the precision of the output.
"""
def decorator(function):
setattr(function, "precision", precision)
return function
return decorator
@contextmanager
def patch_object(object, name, value):
"""Temporarily overwrite attribute on object"""
old_value = getattr(object, name)
setattr(object, name, value)
yield
setattr(object, name, old_value)
| 1,874 | 25.408451 | 105 | py |
larq | larq-main/larq/models_test.py | import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq.models import ModelProfile
class ToyModel(tf.keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv = lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
input_shape=(64, 64, 1),
padding="same",
)
self.pool = tf.keras.layers.GlobalAvgPool2D()
self.dense = tf.keras.layers.Dense(10, activation="softmax")
def call(self, inputs):
return self.dense(self.pool(self.conv(inputs)))
def get_functional_model():
input = tf.keras.Input((32, 32, 3))
x = lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
padding="same",
)(input)
y, z = tf.split(x, 2, axis=-1)
x = tf.concat([y, z], axis=-1)
return tf.keras.Model(input, x, name="toy_model")
def get_profile_model():
return tf.keras.models.Sequential(
[
lq.layers.QuantConv2D(
filters=32,
kernel_size=(3, 3),
kernel_quantizer="ste_sign",
input_shape=(64, 64, 1),
padding="same",
),
tf.keras.layers.MaxPooling2D((2, 2)),
lq.layers.QuantDepthwiseConv2D(
kernel_size=3,
strides=(3, 3),
input_quantizer=lq.quantizers.SteTern(),
depthwise_quantizer=lq.quantizers.SteTern(),
padding="same",
pad_values=1.0,
use_bias=False,
),
tf.keras.layers.BatchNormalization(scale=False),
lq.layers.QuantSeparableConv2D(
32,
(3, 3),
input_quantizer="ste_sign",
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, trainable=False),
],
)
def get_submodel_profile_model(start_index=2, end_index=5):
# Same as above, but with a submodel as a layer
model = get_profile_model()
# Create submodel from e.g. the middle three layers
submodel = tf.keras.models.Sequential(
[layer for layer in model.layers[start_index:end_index]],
)
return tf.keras.models.Sequential(
[*model.layers[:start_index], submodel, *model.layers[end_index:]]
)
def test_model_profile():
profile = ModelProfile(get_profile_model())
assert len(profile.layer_profiles) == 7
def test_layer_profile():
profile = ModelProfile(get_profile_model())
kernel_count = [
32 * 3 * 3 * 1,
0,
32 * 3 * 3,
0,
32 * 3 * 3 * 1 + 32 * 1 * 1 * 32,
0,
32 * 11 * 11 * 10,
]
bias_count = [32, 0, 0, 64, 32, 0, 10]
param_count = [k + b for k, b in zip(kernel_count, bias_count)]
memory = [ # bits * (c * w * h * b) + bits * bias
1 * (32 * 3 * 3 * 1) + 32 * 32,
0,
2 * (32 * 3 * 3),
32 * (2 * 32),
1 * (32 * 3 * 3 * 1 + 32 * 1 * 1 * 32) + 32 * 32,
0,
32 * (32 * 11 * 11 * 10 + 10),
]
int8_fp_weights_mem = [
1 * (32 * 3 * 3 * 1) + 8 * 32,
0,
2 * (32 * 3 * 3),
8 * (32 * 2),
1 * (32 * 3 * 3 * 1 + 32 * 1 * 1 * 32) + 8 * 32,
0,
8 * (32 * 11 * 11 * 10 + 10),
]
fp_equiv_mem = [32 * n for n in param_count]
input_precision = [None, None, 2, None, 1, None, None]
output_shape = [
(-1, 64, 64, 32),
(-1, 32, 32, 32),
(-1, 11, 11, 32),
(-1, 11, 11, 32),
(-1, 11, 11, 32),
(-1, 11 * 11 * 32),
(-1, 10),
]
output_pixels = [int(np.prod(os[1:-1])) for os in output_shape]
unique_param_bidtwidths = [[1, 32], [], [2], [32], [1, 32], [], [32]]
unique_op_precisions = [[32], [], [2], [], [1], [], [32]]
mac_count = [params * pixels for params, pixels in zip(kernel_count, output_pixels)]
bin_mac_count = [
mc if (1 in pb and ip == 1) else 0
for mc, pb, ip in zip(mac_count, unique_param_bidtwidths, input_precision)
]
profiles = profile.layer_profiles
for i in range(len(profiles)):
print(f"Testing layer {i}...")
assert profiles[i].input_precision == input_precision[i]
assert profiles[i].output_shape == output_shape[i]
assert profiles[i].output_pixels == output_pixels[i]
assert profiles[i].weight_count() == param_count[i]
assert profiles[i].unique_param_bidtwidths == unique_param_bidtwidths[i]
assert profiles[i].unique_op_precisions == unique_op_precisions[i]
assert profiles[i].memory == memory[i]
assert profiles[i].fp_equivalent_memory == fp_equiv_mem[i]
assert profiles[i].int8_fp_weights_memory == int8_fp_weights_mem[i]
assert profiles[i].op_count("mac") == mac_count[i]
assert profiles[i].op_count("mac", 1) == bin_mac_count[i]
def test_layer_profile_1d():
model = tf.keras.models.Sequential(
[
lq.layers.QuantConv1D(
filters=32,
kernel_size=3,
input_shape=(64, 6),
kernel_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.MaxPooling1D(2),
lq.layers.QuantSeparableConv1D(
filters=16,
kernel_size=3,
input_quantizer="ste_sign",
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
padding="same",
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, trainable=False),
]
)
profile = ModelProfile(model)
kernel_count = [(32 * 3 * 6), 0, (32 * 3 + 16 * 32), 0, (16 * 32 * 10)]
bias_count = [32, 0, 16, 0, 10]
param_count = [k + b for k, b in zip(kernel_count, bias_count)]
memory = [ # bits * (c * w * d) + bits * bias
1 * (32 * 3 * 6) + 32 * 32,
0,
1 * (32 * 3 + 16 * 32) + 32 * 16,
0,
32 * (32 * 16 * 10 + 10),
]
int8_fp_weights_mem = [
1 * (32 * 3 * 6) + 8 * 32,
0,
1 * (32 * 3 + 16 * 32) + 8 * 16,
0,
8 * (32 * 16 * 10 + 10),
]
fp_equiv_mem = [32 * n for n in param_count]
input_precision = [None, None, 1, None, None]
output_shape = [
(-1, 64, 32),
(-1, 32, 32),
(-1, 32, 16),
(-1, 32 * 16),
(-1, 10),
]
output_pixels = [int(np.prod(os[1:-1])) for os in output_shape]
unique_param_bidtwidths = [[1, 32], [], [1, 32], [], [32]]
unique_op_precisions = [[32], [], [1], [], [32]]
mac_count = [params * pixels for params, pixels in zip(kernel_count, output_pixels)]
bin_mac_count = [
mc if (1 in pb and ip == 1) else 0
for mc, pb, ip in zip(mac_count, unique_param_bidtwidths, input_precision)
]
profiles = profile.layer_profiles
for i in range(len(profiles)):
print(f"Testing layer {i}...")
assert profiles[i].input_precision == input_precision[i]
assert profiles[i].output_shape == output_shape[i]
assert profiles[i].output_pixels == output_pixels[i]
assert profiles[i].weight_count() == param_count[i]
assert profiles[i].unique_param_bidtwidths == unique_param_bidtwidths[i]
assert profiles[i].unique_op_precisions == unique_op_precisions[i]
assert profiles[i].memory == memory[i]
assert profiles[i].fp_equivalent_memory == fp_equiv_mem[i]
assert profiles[i].int8_fp_weights_memory == int8_fp_weights_mem[i]
assert profiles[i].op_count("mac") == mac_count[i]
assert profiles[i].op_count("mac", 1) == bin_mac_count[i]
def test_summary(snapshot, capsys):
model = get_profile_model()
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
# A model with no weights
model = tf.keras.models.Sequential(
[tf.keras.layers.Lambda(lambda x: tf.zeros(2), input_shape=(32, 32))]
)
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
def test_submodel_summary(capsys, snapshot):
default_profile = ModelProfile(get_profile_model())
submodel = get_submodel_profile_model(start_index=2, end_index=5)
submodel_profile = ModelProfile(submodel)
submodel_layer_profile = submodel_profile.layer_profiles[2]
# Assert that layer profile of the submodel "layer" matches the original layers
profiles = default_profile.layer_profiles[2:5]
assert submodel_layer_profile.input_precision == profiles[0].input_precision
assert submodel_layer_profile.output_shape == profiles[-1].output_shape
assert submodel_layer_profile.output_pixels == profiles[-1].output_pixels
assert submodel_layer_profile.weight_count() == sum(
(p.weight_count() for p in profiles)
)
bitwidths = []
op_precisions = []
for p in profiles:
bitwidths.extend(p.unique_param_bidtwidths)
op_precisions.extend(p.unique_op_precisions)
assert set(submodel_layer_profile.unique_param_bidtwidths) == set(bitwidths)
assert set(submodel_layer_profile.unique_op_precisions) == set(op_precisions)
assert submodel_layer_profile.memory == sum((p.memory for p in profiles))
assert submodel_layer_profile.fp_equivalent_memory == sum(
(p.fp_equivalent_memory for p in profiles)
)
assert submodel_layer_profile.int8_fp_weights_memory == sum(
(p.int8_fp_weights_memory for p in profiles)
)
assert submodel_layer_profile.op_count("mac") == sum(
(p.op_count("mac") for p in profiles)
)
assert submodel_layer_profile.op_count("mac", 1) == sum(
(p.op_count("mac", 1) for p in profiles)
)
# Assert that the total profile summary matches
assert submodel_profile.generate_summary() == default_profile.generate_summary()
# Snapshot the submodel profile itself to make sure it remains correct
lq.models.summary(get_submodel_profile_model())
snapshot.assert_match(capsys.readouterr().out)
def test_subclass_model_summary(snapshot, capsys):
model = ToyModel()
model.build((None, 32, 32, 3))
lq.models.summary(model)
captured = capsys.readouterr()
snapshot.assert_match(captured.out)
def test_functional_model_summary(snapshot, capsys):
lq.models.summary(get_functional_model())
captured = capsys.readouterr()
key = "2.4+" if version.parse(tf.__version__) >= version.parse("2.3.9") else "<2.4"
snapshot.assert_match(captured.out.lower(), key)
def test_summary_invalid_model():
with pytest.raises(ValueError):
lq.models.summary(tf.keras.Model())
def test_bitsize_invalid_key():
with pytest.raises(NotImplementedError):
lq.models._bitsize_as_str(-1)
def test_number_as_readable_str_large():
assert lq.models._number_as_readable_str(1e16) == "1.00E+16"
@pytest.fixture(autouse=True)
def run_around_tests():
tf.keras.backend.clear_session()
yield
| 11,312 | 33.281818 | 88 | py |
larq | larq-main/larq/metrics_test.py | import numpy as np
import pytest
import tensorflow as tf
from larq import metrics
def test_config():
mcv = metrics.FlipRatio(values_dtype="int16", name="mcv", dtype=tf.float16)
assert mcv.name == "mcv"
assert mcv.stateful
assert mcv.dtype == tf.float16
assert mcv.values_dtype == tf.int16
mcv2 = metrics.FlipRatio.from_config(mcv.get_config())
assert mcv2.name == "mcv"
assert mcv2.stateful
assert mcv2.dtype == tf.float16
assert mcv2.values_dtype == tf.int16
@pytest.mark.usefixtures("eager_mode")
def test_metric():
mcv = metrics.FlipRatio()
mcv.build((2,))
assert 0 == mcv.result().numpy()
assert 0 == mcv.total.numpy()
assert 0 == mcv.count.numpy()
mcv.update_state(np.array([1, 1]))
assert all([1, 1] == mcv._previous_values.numpy())
assert 0 == mcv.total.numpy()
assert 1 == mcv.count.numpy()
assert 0 == mcv.result().numpy()
mcv.update_state(np.array([2, 2]))
assert all([2, 2] == mcv._previous_values.numpy())
assert 1 == mcv.total.numpy()
assert 2 == mcv.count.numpy()
assert 1 == mcv.result().numpy()
mcv.update_state(np.array([1, 2]))
assert all([1, 2] == mcv._previous_values.numpy())
assert 1.5 == mcv.total.numpy()
assert 3 == mcv.count.numpy()
assert 1.5 / 2 == mcv.result().numpy()
@pytest.mark.usefixtures("eager_mode")
def test_metric_implicit_build():
mcv = metrics.FlipRatio()
mcv.update_state(np.array([1, 1]))
assert all([1, 1] == mcv._previous_values.numpy())
assert 0 == mcv.total.numpy()
assert 1 == mcv.count.numpy()
assert 0 == mcv.result().numpy()
mcv.update_state(np.array([2, 2]))
assert all([2, 2] == mcv._previous_values.numpy())
assert 1 == mcv.total.numpy()
assert 2 == mcv.count.numpy()
assert 1 == mcv.result().numpy()
mcv.update_state(np.array([1, 2]))
assert all([1, 2] == mcv._previous_values.numpy())
assert 1.5 == mcv.total.numpy()
assert 3 == mcv.count.numpy()
assert 1.5 / 2 == mcv.result().numpy()
@pytest.mark.usefixtures("eager_mode")
def test_metric_wrong_shape():
mcv = metrics.FlipRatio()
mcv.build((3,))
with pytest.raises((ValueError, tf.errors.InvalidArgumentError)):
mcv.update_state(np.array([1, 1]))
@pytest.mark.usefixtures("graph_mode")
def test_metric_in_graph_mode():
mcv = metrics.FlipRatio()
mcv.build((2,))
new_state = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2])
update_state_op = mcv.update_state(new_state)
metric_value = mcv.result()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.variables_initializer(mcv.variables))
sess.run(update_state_op, feed_dict={new_state: [1, 1]})
sess.run(update_state_op, feed_dict={new_state: [2, 2]})
sess.run(update_state_op, feed_dict={new_state: [1, 2]})
previous, total, count, result = sess.run(
[mcv._previous_values, mcv.total, mcv.count, metric_value]
)
assert all([1, 2] == previous)
assert 1.5 == total
assert 3 == count
assert 1.5 / 2 == result
| 3,102 | 28.836538 | 79 | py |
larq | larq-main/larq/layers.py | """Each Quantized Layer requires a `input_quantizer` and `kernel_quantizer` that
describes the way of quantizing the activation of the previous layer and the weights
respectively.
If both `input_quantizer` and `kernel_quantizer` are `None` the layer
is equivalent to a full precision layer.
"""
import tensorflow as tf
from packaging import version
from larq import utils
from larq.layers_base import (
QuantizerBase,
QuantizerBaseConv,
QuantizerDepthwiseBase,
QuantizerSeparableBase,
)
@utils.register_keras_custom_object
class QuantDense(QuantizerBase, tf.keras.layers.Dense):
"""Just your regular densely-connected quantized NN layer.
`QuantDense` implements the operation:
`output = activation(dot(input_quantizer(input), kernel_quantizer(kernel)) + bias)`,
where `activation` is the element-wise activation function passed as the
`activation` argument, `kernel` is a weights matrix created by the layer, and `bias`
is a bias vector created by the layer (only applicable if `use_bias` is `True`).
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Dense`.
!!! note ""
If the input to the layer has a rank greater than 2, then it is flattened
prior to the initial dot product with `kernel`.
!!! example
```python
# as first layer in a sequential model:
model = Sequential()
model.add(
QuantDense(
32,
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
input_shape=(16,),
)
)
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(
QuantDense(
32,
input_quantizer="ste_sign",
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
)
)
```
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation
would be a 2D input with shape `(batch_size, input_dim)`.
# Output shape
N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input
with shape `(batch_size, input_dim)`, the output would have shape
`(batch_size, units)`.
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
units,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv1D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv1D):
"""1D quantized convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved with the layer input
over a single spatial (or temporal) dimension to produce a tensor of outputs.
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv1D`.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model, provide an `input_shape`
argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of
10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length
sequences of 128-dimensional vectors.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive). `"causal"`
results in causal (dilated) convolutions, e.g. output[t] does not depend on
input[t+1:]. Useful when modeling temporal data where the model should not
violate the temporal order. See [WaveNet: A Generative Model for Raw Audio,
section 2.1](https://arxiv.org/abs/1609.03499).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any `strides`
value != 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, filters)`.
`steps` value might have changed due to padding or strides.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
pad_values=0.0,
data_format="channels_last",
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv2D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv2D):
"""2D quantized convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
`input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv2D`. If `use_bias` is True, a bias vector is created
and added to the outputs. Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in
`data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters` must
both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv3D(QuantizerBase, QuantizerBaseConv, tf.keras.layers.Conv3D):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. `input_quantizer` and `kernel_quantizer` are the element-wise quantization
functions to use. If both quantization functions are `None` this layer is
equivalent to `Conv3D`. If `use_bias` is True, a bias vector is created and
added to the outputs. Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel, in `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window. Can be a single
integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along each spatial dimension. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults
to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
groups: A positive integer specifying the number of groups in which the input
is split along the channel axis. Each group is convolved separately with
`filters / groups` filters. The output is the concatenation of all the
`groups` results along the channel axis. Input channels and `filters` must
both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
# Output shape
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if groups != 1:
if version.parse(tf.__version__) >= version.parse("2.3"):
kwargs = {**kwargs, "groups": groups}
else:
raise ValueError(
"`groups` != 1 requires TensorFlow version 2.3 or newer."
)
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantDepthwiseConv2D(
QuantizerDepthwiseBase, QuantizerBaseConv, tf.keras.layers.DepthwiseConv2D
):
"""Quantized depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing just the first step in a
depthwise spatial convolution (which acts on each input channel separately).
The `depth_multiplier` argument controls how many output channels are generated per
input channel in the depthwise step.
# Arguments
kernel_size: An integer or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
depth_multiplier: The number of depthwise convolution output channels for each
input channel. The total number of depthwise convolution output channels
will be equal to `filters_in * depth_multiplier`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (ie. `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the `depthwise_kernel`
weights matrix.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to the depthwise kernel
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
kernel_size=kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
depth_multiplier=depth_multiplier,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantSeparableConv1D(
QuantizerSeparableBase, QuantizerBaseConv, tf.keras.layers.SeparableConv1D
):
"""Depthwise separable 1D quantized convolution.
This layer performs a depthwise convolution that acts separately on channels,
followed by a pointwise convolution that mixes channels.
`input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
element-wise quantization functions to use. If all quantization functions are `None`
this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
a bias initializer is provided, it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
# Arguments
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial dimensions of the filters.
strides: A single integer specifying the strides of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch, length, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying the dilation rate to use for dilated
convolution. Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a linear activation.
use_bias: Boolean, whether the layer uses a bias.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the depthwise kernel.
pointwise_quantizer: Quantization function applied to the pointwise kernel.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise convolution
kernel.
pointwise_regularizer: Optional regularizer for the pointwise convolution
kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer`
(e.g. used for norm constraints or value constraints for layer weights).
The function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
pointwise_quantizer=None,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
pointwise_quantizer=pointwise_quantizer,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantSeparableConv2D(
QuantizerSeparableBase, QuantizerBaseConv, tf.keras.layers.SeparableConv2D
):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing a depthwise spatial convolution
(which acts on each input channel separately) followed by a pointwise convolution
which mixes together the resulting output channels. The `depth_multiplier` argument
controls how many output channels are generated per input channel
in the depthwise step.
`input_quantizer`, `depthwise_quantizer` and `pointwise_quantizer` are the
element-wise quantization functions to use. If all quantization functions are `None`
this layer is equivalent to `SeparableConv1D`. If `use_bias` is True and
a bias initializer is provided, it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Intuitively, separable convolutions can be understood as a way to factorize a
convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height and
width of the 2D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width. Can be a single integer to specify
the same value for all spatial dimensions. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
pad_values: The pad value to use when `padding="same"`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
depth_multiplier: The number of depthwise convolution output channels for each
input channel. The total number of depthwise convolution output channels
will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
depthwise_quantizer: Quantization function applied to the depthwise kernel
matrix.
pointwise_quantizer: Quantization function applied to the pointwise kernel
matrix.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to the depthwise kernel
matrix.
pointwise_regularizer: Regularizer function applied to the pointwise kernel
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix.
pointwise_constraint: Constraint function applied to the pointwise kernel
matrix.
bias_constraint: Constraint function applied to the bias vector.`
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
pad_values=0.0,
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
input_quantizer=None,
depthwise_quantizer=None,
pointwise_quantizer=None,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
pad_values=pad_values,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
depthwise_quantizer=depthwise_quantizer,
pointwise_quantizer=pointwise_quantizer,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv2DTranspose(QuantizerBase, tf.keras.layers.Conv2DTranspose):
"""Transposed quantized convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises from the desire to use a
transformation going in the opposite direction of a normal convolution, i.e.,
from something that has the shape of the output of some convolution to something
that has the shape of its input while maintaining a connectivity pattern
that is compatible with said convolution. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `Conv2DTranspose`.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis), e.g.
`input_shape=(128, 128, 3)` for 128x128 RGB pictures in
`data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single integer
to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers, specifying the amount
of padding along the height and width of the output tensor. Can be a single
integer to specify the same value for all spatial dimensions. The amount of
output padding along a given dimension must be lower than the stride along
that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`. The
ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
# References
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantConv3DTranspose(QuantizerBase, tf.keras.layers.Conv3DTranspose):
"""Transposed quantized convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `Conv3DTranspose`.
When using this layer as the first layer in a model, provide the keyword argument
`input_shape` (tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height
and width of the 3D convolution window. Can be a single integer to specify the
same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of the
convolution along the depth, height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers, specifying the amount
of padding along the depth, height, and width. Can be a single integer to
specify the same value for all spatial dimensions. The amount of output
padding along a given dimension must be lower than the stride along that
same dimension. If set to `None` (default), the output shape is inferred.
data_format: A string, one of `channels_last` (default) or `channels_first`. The
ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, depth, height, width, channels)` while
`channels_first` corresponds to inputs with shape
`(batch, channels, depth, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. Can be a single integer to specify the
same value for all spatial dimensions. Currently, specifying any
`dilation_rate` value != 1 is incompatible with specifying any stride value
!= 1.
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
# Input shape
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
# Output shape
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
# References
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
@utils.register_keras_custom_object
class QuantLocallyConnected1D(QuantizerBase, tf.keras.layers.LocallyConnected1D):
"""Locally-connected quantized layer for 1D inputs.
The `QuantLocallyConnected1D` layer works similarly to the `QuantConv1D` layer,
except that weights are unshared, that is, a different set of filters is applied
at each different patch of the input. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `LocallyConnected1D`.
!!! example
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(QuantLocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(QuantLocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the stride
length of the convolution. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch, length, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, length)`. It defaults
to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
implementation=implementation,
**kwargs,
)
@utils.register_keras_custom_object
class QuantLocallyConnected2D(QuantizerBase, tf.keras.layers.LocallyConnected2D):
"""Locally-connected quantized layer for 2D inputs.
The `QuantLocallyConnected2D` layer works similarly to the `QuantConv2D` layer,
except that weights are unshared, that is, a different set of filters is applied
at each different patch of the input. `input_quantizer` and `kernel_quantizer`
are the element-wise quantization functions to use. If both quantization functions
are `None` this layer is equivalent to `LocallyConnected2D`.
!!! example
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(QuantLocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(QuantLocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the width and height. Can be a single integer to specify
the same value for all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds to
inputs with shape `(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape `(batch, channels, height, width)`. It
defaults to the `image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be "channels_last".
activation: Activation function to use. If you don't specify anything,
no activation is applied (`a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
input_quantizer: Quantization function applied to the input of the layer.
kernel_quantizer: Quantization function applied to the `kernel` weights matrix.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1` or `2`.
`1` loops over input spatial locations to perform the forward pass.
It is memory-efficient but performs a lot of (small) ops.
`2` stores layer weights in a dense but sparsely-populated 2D matrix
and implements the forward pass as a single matrix-multiply. It uses
a lot of RAM but performs few (large) ops.
Depending on the inputs, layer parameters, hardware, and
`tf.executing_eagerly()` one implementation can be dramatically faster
(e.g. 50X) than another.
It is recommended to benchmark both in the setting of interest to pick
the most efficient one (in terms of speed and memory usage).
Following scenarios could benefit from setting `implementation=2`:
- eager execution;
- inference;
- running on CPU;
- large amount of RAM available;
- small models (few filters, small kernel);
- using `padding=same` (only possible with `implementation=2`).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
activation=None,
use_bias=True,
input_quantizer=None,
kernel_quantizer=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs,
):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
input_quantizer=input_quantizer,
kernel_quantizer=kernel_quantizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
implementation=implementation,
**kwargs,
)
| 65,598 | 46.535507 | 91 | py |
larq | larq-main/larq/callbacks_test.py | import math
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
from tensorflow.python.keras import testing_utils
import larq as lq
from larq import testing_utils as lq_testing_utils
from larq.callbacks import HyperparameterScheduler
if version.parse(tf.__version__) >= version.parse("2.11"):
from tensorflow.keras.optimizers import legacy as optimizers # type: ignore
else:
from tensorflow.keras import optimizers # type: ignore
class TestHyperparameterScheduler:
def _create_data_and_model(self, train_samples=1000):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=0,
input_shape=(10,),
num_classes=2,
)
y_train = tf.keras.utils.to_categorical(y_train)
model = lq_testing_utils.get_small_bnn_model(
x_train.shape[1], 20, y_train.shape[1]
)
return x_train, y_train, model
def test_normal_optimizer(self):
x_train, y_train, model = self._create_data_and_model()
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
# We shouldn' t need to specify the optimizer
test_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
verbose=1,
)
num_epochs = 2
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[test_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.lr),
scheduler(num_epochs - 1),
decimal=8,
)
def test_per_step(self):
train_samples = 20
x_train, y_train, model = self._create_data_and_model(train_samples)
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
# Test that we don't accept incorrect `update_freq`
with pytest.raises(ValueError):
HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
update_freq="wrong",
)
# The actual scheduler we'll use
test_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="lr",
update_freq="step",
verbose=1,
)
num_epochs = 1
batch_size = 10
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[test_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.lr),
scheduler(math.ceil(train_samples / batch_size) - 1),
decimal=8,
)
def test_case_optimizer(self):
x_train, y_train, model = self._create_data_and_model()
bop = lq.optimizers.Bop(threshold=1e-6, gamma=1e-3)
adam = optimizers.Adam(0.01)
case_optimizer = lq.optimizers.CaseOptimizer(
(lq.optimizers.Bop.is_binary_variable, bop),
default_optimizer=adam,
)
model.compile(
loss="categorical_crossentropy",
optimizer=case_optimizer,
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
cbk_gamma_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[0],
hyperparameter="gamma",
verbose=1,
)
cbk_threshold_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[0],
hyperparameter="threshold",
verbose=1,
)
cbk_lr_scheduler = HyperparameterScheduler(
schedule=scheduler,
optimizer=model.optimizer.optimizers[1],
hyperparameter="lr",
verbose=1,
)
num_epochs = 3
model.fit(
x_train,
y_train,
epochs=num_epochs,
batch_size=16,
callbacks=[cbk_gamma_scheduler, cbk_lr_scheduler, cbk_threshold_scheduler],
verbose=0,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[0].gamma),
scheduler(num_epochs - 1),
decimal=8,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[0].threshold),
scheduler(num_epochs - 1),
decimal=8,
)
np.testing.assert_almost_equal(
tf.keras.backend.get_value(model.optimizer.optimizers[1].lr),
scheduler(num_epochs - 1),
decimal=8,
)
def test_wrong_param(self):
x_train, y_train, model = self._create_data_and_model()
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(0.01),
metrics=["accuracy"],
)
def scheduler(x):
return 1.0 / (1.0 + x)
wrong_scheduler = HyperparameterScheduler(
schedule=scheduler,
hyperparameter="invalid_param",
verbose=1,
)
with pytest.raises(ValueError):
model.fit(
x_train,
y_train,
epochs=1,
batch_size=16,
callbacks=[wrong_scheduler],
verbose=0,
)
| 6,015 | 27.647619 | 87 | py |
larq | larq-main/larq/constraints_test.py | import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("name", ["weight_clip"])
def test_serialization(name):
fn = tf.keras.constraints.get(name)
ref_fn = getattr(lq.constraints, name)()
assert fn.__class__ == ref_fn.__class__
config = tf.keras.constraints.serialize(fn)
fn = tf.keras.constraints.deserialize(config)
assert fn.__class__ == ref_fn.__class__
def test_clip():
real_values = generate_real_values_with_zeros()
clip_instance = lq.constraints.weight_clip(clip_value=0.75)
result = clip_instance(tf.keras.backend.variable(real_values))
result = tf.keras.backend.eval(result)
np.testing.assert_allclose(result, np.clip(real_values, -0.75, 0.75))
| 811 | 31.48 | 73 | py |
larq | larq-main/larq/layers_test.py | import inspect
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
PARAMS_ALL_LAYERS = [
(lq.layers.QuantDense, tf.keras.layers.Dense, (3, 2), dict(units=3)),
(
lq.layers.QuantConv1D,
tf.keras.layers.Conv1D,
(2, 3, 7),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv2D,
tf.keras.layers.Conv2D,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv3D,
tf.keras.layers.Conv3D,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv2DTranspose,
tf.keras.layers.Conv2DTranspose,
(2, 3, 7, 6),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantConv3DTranspose,
tf.keras.layers.Conv3DTranspose,
(2, 3, 7, 6, 5),
dict(filters=2, kernel_size=3),
),
(
lq.layers.QuantLocallyConnected1D,
tf.keras.layers.LocallyConnected1D,
(2, 8, 5),
dict(filters=4, kernel_size=3),
),
(
lq.layers.QuantLocallyConnected2D,
tf.keras.layers.LocallyConnected2D,
(8, 6, 10, 4),
dict(filters=3, kernel_size=3),
),
]
PARAMS_SEP_LAYERS = [
(lq.layers.QuantSeparableConv1D, tf.keras.layers.SeparableConv1D, (2, 3, 7)),
(lq.layers.QuantSeparableConv2D, tf.keras.layers.SeparableConv2D, (2, 3, 7, 6)),
]
class TestLayers:
@pytest.mark.parametrize(
"quantized_layer, layer, input_shape, kwargs", PARAMS_ALL_LAYERS
)
def test_binarization(
self, quantized_layer, layer, input_shape, kwargs, keras_should_run_eagerly
):
input_data = testing_utils.random_input(input_shape)
random_weight = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
quantized_layer,
kwargs=dict(
**kwargs,
kernel_quantizer="ste_sign",
input_quantizer="ste_sign",
kernel_initializer=tf.keras.initializers.constant(random_weight),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
layer(
**kwargs,
kernel_initializer=tf.keras.initializers.constant(
np.sign(random_weight)
),
input_shape=input_shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
@pytest.mark.parametrize("quantized_layer, layer, input_shape", PARAMS_SEP_LAYERS)
def test_separable_layers(
self, quantized_layer, layer, input_shape, keras_should_run_eagerly
):
input_data = testing_utils.random_input(input_shape)
random_d_kernel = np.random.random() - 0.5
random_p_kernel = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
quantized_layer,
kwargs=dict(
filters=3,
kernel_size=3,
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
input_quantizer="ste_sign",
depthwise_initializer=tf.keras.initializers.constant(
random_d_kernel
),
pointwise_initializer=tf.keras.initializers.constant(
random_p_kernel
),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
layer(
filters=3,
kernel_size=3,
depthwise_initializer=tf.keras.initializers.constant(
np.sign(random_d_kernel)
),
pointwise_initializer=tf.keras.initializers.constant(
np.sign(random_p_kernel)
),
input_shape=input_shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
def test_depthwise_layers(self, keras_should_run_eagerly):
input_data = testing_utils.random_input((2, 3, 7, 6))
random_weight = np.random.random() - 0.5
with lq.context.metrics_scope(["flip_ratio"]):
quant_output = testing_utils.layer_test(
lq.layers.QuantDepthwiseConv2D,
kwargs=dict(
kernel_size=3,
depthwise_quantizer="ste_sign",
input_quantizer="ste_sign",
depthwise_initializer=tf.keras.initializers.constant(random_weight),
),
input_data=input_data,
should_run_eagerly=keras_should_run_eagerly,
)
fp_model = tf.keras.models.Sequential(
[
tf.keras.layers.DepthwiseConv2D(
kernel_size=3,
depthwise_initializer=tf.keras.initializers.constant(
np.sign(random_weight)
),
input_shape=input_data.shape[1:],
)
]
)
np.testing.assert_allclose(quant_output, fp_model.predict(np.sign(input_data)))
@pytest.mark.parametrize(
"layer_cls, input_dim",
[
(lq.layers.QuantConv1D, 3),
(lq.layers.QuantConv2D, 4),
(lq.layers.QuantConv3D, 5),
(lq.layers.QuantSeparableConv1D, 3),
(lq.layers.QuantSeparableConv2D, 4),
(lq.layers.QuantDepthwiseConv2D, 4),
],
)
@pytest.mark.parametrize("dilation", [True, False])
def test_non_zero_padding_layers(
self, mocker, layer_cls, input_dim, data_format, dilation
):
inputs = np.zeros(np.random.randint(5, 20, size=input_dim), np.float32)
kernel = tuple(np.random.randint(3, 7, size=input_dim - 2))
rand_tuple = tuple(np.random.randint(1, 4, size=input_dim - 2))
if not dilation and layer_cls in (
lq.layers.QuantSeparableConv2D,
lq.layers.QuantDepthwiseConv2D,
):
rand_tuple = int(rand_tuple[0])
kwargs = {"dilation_rate": rand_tuple} if dilation else {"strides": rand_tuple}
args = (kernel,) if layer_cls == lq.layers.QuantDepthwiseConv2D else (2, kernel)
ref_layer = layer_cls(*args, padding="same", **kwargs)
spy = mocker.spy(tf, "pad")
layer = layer_cls(*args, padding="same", pad_values=1.0, **kwargs)
layer.build(inputs.shape)
conv_op = getattr(layer, "_convolution_op", None)
assert layer(inputs).shape == ref_layer(inputs).shape
spy.assert_called_once_with(mocker.ANY, mocker.ANY, constant_values=1.0)
assert conv_op == getattr(layer, "_convolution_op", None)
@pytest.mark.parametrize(
"layer_cls",
[
lq.layers.QuantConv1D,
lq.layers.QuantConv2D,
lq.layers.QuantConv3D,
lq.layers.QuantSeparableConv1D,
lq.layers.QuantSeparableConv2D,
lq.layers.QuantDepthwiseConv2D,
],
)
@pytest.mark.parametrize("static", [True, False])
def test_non_zero_padding_shapes(self, layer_cls, data_format, static):
layer = layer_cls(
16, 3, padding="same", pad_values=1.0, data_format=data_format
)
input_shape = [32 if static else None] * layer.rank + [3]
if data_format == "channels_first":
input_shape = reversed(input_shape)
input = tf.keras.layers.Input(shape=input_shape)
layer(input)
if static:
for dim in layer.output_shape[1:]:
assert dim is not None
class TestLayerWarns:
def test_layer_warns(self, caplog):
lq.layers.QuantDense(5, kernel_quantizer="ste_sign")
assert len(caplog.records) >= 1
assert "kernel_constraint" in caplog.text
def test_layer_does_not_warn(self, caplog):
lq.layers.QuantDense(
5, kernel_quantizer="ste_sign", kernel_constraint="weight_clip"
)
assert "kernel_constraint" not in caplog.text
def test_depthwise_layer_warns(self, caplog):
lq.layers.QuantDepthwiseConv2D(5, depthwise_quantizer="ste_sign")
assert len(caplog.records) >= 1
assert "depthwise_constraint" in caplog.text
def test_depthwise_layer_does_not_warn(self, caplog):
lq.layers.QuantDepthwiseConv2D(
5, depthwise_quantizer="ste_sign", depthwise_constraint="weight_clip"
)
assert "depthwise_constraint" not in caplog.text
def test_separable_layer_warns(self, caplog):
lq.layers.QuantSeparableConv2D(
3, 3, depthwise_quantizer="ste_sign", pointwise_quantizer="ste_sign"
)
assert "depthwise_constraint" in caplog.text
assert "pointwise_constraint" in caplog.text
def test_separable_layer_does_not_warn(self, caplog):
lq.layers.QuantSeparableConv2D(
3,
3,
depthwise_quantizer="ste_sign",
pointwise_quantizer="ste_sign",
depthwise_constraint="weight_clip",
pointwise_constraint="weight_clip",
)
assert caplog.records == []
def test_conv1d_non_zero_padding_raises(self):
with pytest.raises(ValueError, match=r".*pad_values.*"):
lq.layers.QuantConv1D(24, 3, padding="causal", pad_values=1.0)
@pytest.mark.parametrize(
"layer", [lq.layers.QuantConv1D, lq.layers.QuantConv2D, lq.layers.QuantConv3D]
)
def test_groups(self, layer):
if version.parse(tf.__version__) < version.parse("2.3"):
with pytest.raises(ValueError, match=r".*groups.*"):
layer(24, 3, groups=2)
else:
assert layer(24, 3, groups=2).groups == 2
@pytest.mark.parametrize(
"quant_layer,layer",
[
(lq.layers.QuantDense, tf.keras.layers.Dense),
(lq.layers.QuantConv1D, tf.keras.layers.Conv1D),
(lq.layers.QuantConv2D, tf.keras.layers.Conv2D),
(lq.layers.QuantConv3D, tf.keras.layers.Conv3D),
(lq.layers.QuantConv2DTranspose, tf.keras.layers.Conv2DTranspose),
(lq.layers.QuantConv3DTranspose, tf.keras.layers.Conv3DTranspose),
(lq.layers.QuantLocallyConnected1D, tf.keras.layers.LocallyConnected1D),
(lq.layers.QuantLocallyConnected2D, tf.keras.layers.LocallyConnected2D),
(lq.layers.QuantDepthwiseConv2D, tf.keras.layers.DepthwiseConv2D),
],
)
def test_layer_kwargs(quant_layer, layer):
quant_params = inspect.signature(quant_layer).parameters
params = inspect.signature(layer).parameters
quant_params_list = list(quant_params.keys())
params_list = list(params.keys())
ignored_params = [
"input_quantizer",
"kernel_quantizer",
"depthwise_quantizer",
"pointwise_quantizer",
"pad_values",
]
if version.parse(tf.__version__) < version.parse("2.3"):
ignored_params.append("groups")
if layer in (tf.keras.layers.DepthwiseConv2D, tf.keras.layers.Conv3DTranspose):
ignored_params.append("dilation_rate")
for p in ignored_params:
try:
quant_params_list.remove(p)
except ValueError:
pass
assert quant_params_list == params_list
for param in params_list:
assert quant_params.get(param).default == params.get(param).default # type: ignore
| 12,025 | 34.68546 | 91 | py |
larq | larq-main/larq/constraints.py | """Functions from the `constraints` module allow setting constraints
(eg. weight clipping) on network parameters during optimization.
The penalties are applied on a per-layer basis. The exact API will depend on the layer,
but the layers `QuantDense`, `QuantConv1D`, `QuantConv2D` and `QuantConv3D` have a
unified API.
These layers expose 2 keyword arguments:
- `kernel_constraint` for the main weights matrix
- `bias_constraint` for the bias.
```python
import larq as lq
lq.layers.QuantDense(64, kernel_constraint="weight_clip")
lq.layers.QuantDense(64, kernel_constraint=lq.constraints.WeightClip(2.))
```
"""
from typing import Any, Mapping
import tensorflow as tf
from larq import utils
@utils.register_keras_custom_object
class WeightClip(tf.keras.constraints.Constraint):
"""Weight Clip constraint
Constrains the weights incident to each hidden unit
to be between `[-clip_value, clip_value]`.
# Arguments
clip_value: The value to clip incoming weights.
"""
def __init__(self, clip_value: float = 1):
self.clip_value = clip_value
def __call__(self, x: tf.Tensor) -> tf.Tensor:
return tf.clip_by_value(x, -self.clip_value, self.clip_value)
def get_config(self) -> Mapping[str, Any]:
return {"clip_value": self.clip_value}
# Aliases
@utils.register_keras_custom_object
class weight_clip(WeightClip):
pass
| 1,392 | 25.283019 | 87 | py |
larq | larq-main/larq/models.py | import itertools
from dataclasses import dataclass
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, TypeVar, Union
import numpy as np
import tensorflow as tf
from terminaltables import AsciiTable
from larq import layers as lq_layers
from larq.utils import memory_as_readable_str
__all__ = ["summary"]
mac_containing_layers = (
lq_layers.QuantConv2D,
lq_layers.QuantSeparableConv2D,
lq_layers.QuantDepthwiseConv2D,
lq_layers.QuantDense,
tf.keras.layers.Conv2D,
tf.keras.layers.SeparableConv2D,
tf.keras.layers.DepthwiseConv2D,
tf.keras.layers.Dense,
lq_layers.QuantConv1D,
lq_layers.QuantSeparableConv1D,
tf.keras.layers.Conv1D,
tf.keras.layers.SeparableConv1D,
)
op_count_supported_layer_types = (
tf.keras.layers.Flatten,
tf.keras.layers.BatchNormalization,
tf.keras.layers.MaxPool2D,
tf.keras.layers.AveragePooling2D,
tf.keras.layers.MaxPool1D,
tf.keras.layers.AveragePooling1D,
*mac_containing_layers,
)
T = TypeVar("T")
def _flatten(lst: Iterator[Iterator[T]]) -> Sequence[T]:
return list(itertools.chain.from_iterable(lst))
def _bitsize_as_str(bitsize: int) -> str:
bitsize_names = {8: "byte", 8 * 1024: "kB"}
try:
return bitsize_names[bitsize]
except KeyError:
raise NotImplementedError()
def _number_as_readable_str(num: float) -> str:
# The initial rounding here is necessary so that e.g. `999000` gets
# formatted as `1.000 M` rather than `1000 k`
num = float(f"{num:.3g}")
# For numbers less than 1000, output them directly, stripping any trailing
# zeros and decimal places.
if num < 1000:
return str(num).rstrip("0").rstrip(".")
# For numbers that are at least 1000 trillion (1 quadrillion) format with
# scientific notation (3 s.f. = 2 d.p. in scientific notation).
if num >= 1e15:
return f"{num:.2E}"
# Count the magnitude.
magnitude = 0
while abs(num) >= 1000 and magnitude < 4:
magnitude += 1
num /= 1000.0
# ':.3g' formats the number with 3 significant figures, without stripping trailing
# zeros.
num = f"{num:.3g}".rstrip(".")
unit = ["", " k", " M", " B", " T"][magnitude]
return num + unit
def _format_table_entry(x: float, units: int = 1) -> Union[float, str]:
try:
assert not np.isnan(x)
if type(x) == str or x == 0 or units == 1:
return x
return x / units
except Exception:
return "?"
def _normalize_shape(shape):
return tuple(dim if dim else -1 for dim in shape)
class WeightProfile:
def __init__(self, weight, trainable: bool = True):
self._weight = weight
self.bitwidth = getattr(weight, "precision", 32)
self.trainable = trainable
@property
def count(self) -> int:
return int(np.prod(self._weight.shape.as_list()))
@property
def memory(self) -> int:
return self.bitwidth * self.count
@property
def fp_equivalent_memory(self) -> int:
return 32 * self.count
@property
def int8_fp_weights_memory(self) -> int:
"""Count any 32- or 16-bit weights as 8 bits instead."""
if self.bitwidth > 8:
return self.count * 8
return self.bitwidth * self.count
def is_bias(self) -> bool:
return "bias" in self._weight.name
@dataclass
class OperationProfile:
n: int
precision: int
op_type: str
class LayerProfile:
def __init__(self, layer: tf.keras.layers.Layer):
self._layer = layer
self.name = layer.name
weights = layer.weights
if isinstance(layer, tf.keras.layers.BatchNormalization):
fused_pairs = [("beta", "moving_mean"), ("gamma", "moving_variance")]
for pair in fused_pairs:
names = [w.name.split("/")[-1].replace(":0", "") for w in weights]
if pair[0] in names and pair[1] in names:
weights.pop(names.index(pair[0]))
self.weight_profiles = [
WeightProfile(
weight,
trainable=any(weight is w for w in layer.trainable_weights),
)
for weight in weights
]
self.op_profiles = []
if isinstance(layer, mac_containing_layers) and self.output_pixels:
for p in self.weight_profiles:
if not p.is_bias():
self.op_profiles.append(
OperationProfile(
n=p.count * self.output_pixels,
precision=max(self.input_precision or 32, p.bitwidth),
op_type="mac",
)
)
@property
def memory(self) -> int:
return sum(p.memory for p in self.weight_profiles)
@property
def int8_fp_weights_memory(self) -> int:
return sum(p.int8_fp_weights_memory for p in self.weight_profiles)
@property
def fp_equivalent_memory(self) -> int:
return sum(p.fp_equivalent_memory for p in self.weight_profiles)
def weight_count(
self, bitwidth: Optional[int] = None, trainable: Optional[bool] = None
) -> int:
count = 0
for p in self.weight_profiles:
if (bitwidth is None or p.bitwidth == bitwidth) and (
trainable is None or p.trainable == trainable
):
count += p.count
return count
def op_count(
self, op_type: Optional[str] = None, precision: Optional[int] = None
) -> Optional[int]:
if op_type != "mac":
raise ValueError("Currently only counting of MAC-operations is supported.")
if (
isinstance(self._layer, op_count_supported_layer_types)
and self.output_pixels
):
count = 0
for op in self.op_profiles:
if (precision is None or op.precision == precision) and (
op_type is None or op.op_type == op_type
):
count += op.n
return count
return None
@property
def input_precision(self) -> Optional[int]:
try:
return self._layer.input_quantizer.precision
except AttributeError:
return None
@property
def output_shape(self) -> Optional[Sequence[int]]:
try:
output_shape = self._layer.output_shape
if isinstance(output_shape, list):
if len(output_shape) == 1:
return _normalize_shape(output_shape[0])
return [_normalize_shape(shape) for shape in output_shape]
return _normalize_shape(output_shape)
except AttributeError:
return None
@property
def output_shape_str(self) -> str:
try:
return str(self.output_shape or "multiple")
except RuntimeError:
return "?"
@property
def output_pixels(self) -> Optional[int]:
"""Number of pixels for a single feature map (1 for fully connected layers)."""
if not self.output_shape:
return None
if len(self.output_shape) == 4:
return int(np.prod(self.output_shape[1:3]))
if len(self.output_shape) == 3:
return self.output_shape[1]
if len(self.output_shape) == 2:
return 1
raise NotImplementedError()
@property
def unique_param_bidtwidths(self) -> Sequence[int]:
return sorted(set([p.bitwidth for p in self.weight_profiles]))
@property
def unique_op_precisions(self) -> Sequence[int]:
return sorted(set([op.precision for op in self.op_profiles]))
def generate_table_row(
self, table_config: Mapping[str, Any]
) -> Sequence[Union[str, float]]:
row = [self.name, self.input_precision or "-", self.output_shape_str]
for i in table_config["param_bidtwidths"]:
n = self.weight_count(i)
n = _format_table_entry(n, table_config["param_units"])
row.append(n)
row.append(_format_table_entry(self.memory, table_config["memory_units"]))
for i in table_config["mac_precisions"]:
n = self.op_count("mac", i)
n = _format_table_entry(n, table_config["mac_units"])
row.append(n)
return row
class ModelProfile(LayerProfile):
def __init__(self, model: tf.keras.models.Model):
self.name = model.name
def get_profile(layer):
return (
LayerProfile(layer)
if not isinstance(layer, tf.keras.models.Model)
else ModelProfile(layer)
)
self.layer_profiles = [get_profile(layer) for layer in model.layers]
@property
def memory(self) -> int:
return sum(lp.memory for lp in self.layer_profiles)
@property
def int8_fp_weights_memory(self) -> int:
return sum(lp.int8_fp_weights_memory for lp in self.layer_profiles)
@property
def fp_equivalent_memory(self) -> int:
return sum(lp.fp_equivalent_memory for lp in self.layer_profiles)
def weight_count(
self, bitwidth: Optional[int] = None, trainable: Optional[bool] = None
) -> int:
return sum(lp.weight_count(bitwidth, trainable) for lp in self.layer_profiles)
def op_count(
self, op_type: Optional[str] = None, bitwidth: Optional[int] = None
) -> int:
return sum(lp.op_count(op_type, bitwidth) or 0 for lp in self.layer_profiles)
@property
def unique_param_bidtwidths(self) -> Sequence[int]:
return sorted(
set(_flatten(lp.unique_param_bidtwidths for lp in self.layer_profiles))
)
@property
def unique_op_precisions(self) -> Sequence[int]:
return sorted(
set(_flatten(lp.unique_op_precisions for lp in self.layer_profiles))
)
@property
def input_precision(self) -> Optional[int]:
return self.layer_profiles[0].input_precision
@property
def output_shape(self) -> Optional[Sequence[int]]:
return self.layer_profiles[-1].output_shape
def _generate_table_header(self, table_config: Mapping[str, Any]) -> Sequence[str]:
return [
"Layer",
"Input prec.\n(bit)",
"Outputs",
*(
f"# {i}-bit\nx {table_config['param_units']}"
for i in table_config["param_bidtwidths"]
),
f"Memory\n({_bitsize_as_str(table_config['memory_units'])})",
*(f"{i}-bit MACs" for i in table_config["mac_precisions"]),
]
def _generate_table_total(
self, table_config: Mapping[str, Any]
) -> Sequence[Union[float, str]]:
row = ["Total", "", ""]
for i in table_config["param_bidtwidths"]:
row.append(
_format_table_entry(self.weight_count(i), table_config["param_units"])
)
row.append(_format_table_entry(self.memory, table_config["memory_units"]))
for i in table_config["mac_precisions"]:
row.append(
_format_table_entry(self.op_count("mac", i), table_config["mac_units"])
)
return row
def generate_table(
self, include_macs: bool = True
) -> Sequence[Sequence[Union[float, str]]]:
table_config = {
"param_bidtwidths": self.unique_param_bidtwidths,
"mac_precisions": self.unique_op_precisions if include_macs else [],
"param_units": 1,
"memory_units": 8 * 1024,
"mac_units": 1,
}
table = []
table.append(self._generate_table_header(table_config))
for lp in self.layer_profiles:
table.append(lp.generate_table_row(table_config))
table.append(self._generate_table_total(table_config))
return table
def generate_summary(
self, include_macs: bool = True
) -> Sequence[Sequence[Union[str, float]]]:
summary = [
["Total params", _number_as_readable_str(self.weight_count())],
[
"Trainable params",
_number_as_readable_str(self.weight_count(trainable=True)),
],
[
"Non-trainable params",
_number_as_readable_str(self.weight_count(trainable=False)),
],
["Model size", memory_as_readable_str(self.memory)],
[
"Model size (8-bit FP weights)",
memory_as_readable_str(self.int8_fp_weights_memory),
],
["Float-32 Equivalent", memory_as_readable_str(self.fp_equivalent_memory)],
[
"Compression Ratio of Memory",
self.memory / max(1e-8, self.fp_equivalent_memory),
],
]
if include_macs:
binarization_ratio = self.op_count("mac", 1) / max(
1, self.op_count(op_type="mac")
)
ternarization_ratio = self.op_count("mac", 2) / max(
1, self.op_count(op_type="mac")
)
summary.append(
[
"Number of MACs",
_number_as_readable_str(self.op_count(op_type="mac")),
]
)
if binarization_ratio > 0:
summary.append(
["Ratio of MACs that are binarized", f"{binarization_ratio:.4f}"]
)
if ternarization_ratio > 0:
summary.append(
["Ratio of MACs that are ternarized", f"{ternarization_ratio:.4f}"]
)
return summary
def sanitize_table(table_data: Sequence[Sequence[Any]]) -> Sequence[Sequence[str]]:
return [
[f"{v:.2f}" if type(v) == float else str(v) for v in row] for row in table_data
]
class LayersTable(AsciiTable):
def __init__(self, table_data, title=None):
super().__init__(sanitize_table(table_data), title=title)
self.inner_column_border = False
self.justify_columns = {
i: "left" if i == 0 else "right" for i in range(len(table_data[0]))
}
self.inner_footing_row_border = True
self.inner_heading_row_border = True
class SummaryTable(AsciiTable):
def __init__(self, table_data, title=None):
super().__init__(sanitize_table(table_data), title=title)
self.inner_column_border = False
self.inner_heading_row_border = False
def summary(
model: tf.keras.models.Model,
print_fn: Optional[Callable[[str], Any]] = None,
include_macs: bool = True,
) -> None:
"""Prints a string summary of the network.
The summary includes the following information per layer:
- input precision,
- output dimension,
- weight count (broken down by bidtwidth),
- memory footprint in kilobytes (`8*1024` 1-bit weights = 1 kB),
- number of multiply-accumulate (MAC) operations broken down by precision (*optional & expermental*).
A single MAC operation contains both a multiplication and an addition. The precision
of a MAC operation is defined as the maximum bitwidth of its inputs.
Additionally, the following overall statistics for the model are supplied:
- total number of weights,
- total number of trainable weights,
- total number of non-trainable weights,
- model size,
- model size (8-bit FP weights): memory footprint if FP weights were 8 bit,
- float-32 equivalent size: memory footprint if all weights were 32 bit,
- compression ratio achieved by quantizing weights,
- total number of MAC operations,
- ratio of MAC operations that is binarized and can be accelated with XNOR-gates.
# Arguments
model: model instance.
print_fn: Print function to use. Defaults to `print`. You can set it to a custom
function in order to capture the string summary.
include_macs: whether or not to include the number of MAC-operations in the
summary.
# Raises
ValueError: if called before the model is built.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. Build the model first by calling "
"`model.build()` or calling `model.fit()` with some data, or specify an "
"`input_shape` argument in the first layer(s) for automatic build."
)
if not print_fn:
print_fn = print
model_profile = ModelProfile(model)
print_fn(
LayersTable(model_profile.generate_table(), title=f"{model.name} stats").table
)
print_fn(
SummaryTable(
model_profile.generate_summary(include_macs), title=f"{model.name} summary"
).table
)
| 16,824 | 31.861328 | 105 | py |
larq | larq-main/larq/metrics.py | """We add metrics specific to extremely quantized networks using a
`larq.context.metrics_scope` rather than through the `metrics` parameter of
`model.compile()`, where most common metrics reside. This is because, to calculate
metrics like the `flip_ratio`, we need a layer's kernel or activation and not just the
`y_true` and `y_pred` that Keras passes to metrics defined in the usual way.
"""
import numpy as np
import tensorflow as tf
from larq import utils
@utils.register_alias("flip_ratio")
@utils.register_keras_custom_object
class FlipRatio(tf.keras.metrics.Metric):
"""Computes the mean ratio of changed values in a given tensor.
!!! example
```python
m = metrics.FlipRatio()
m.update_state((1, 1)) # result: 0
m.update_state((2, 2)) # result: 1
m.update_state((1, 2)) # result: 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
# Arguments
name: Name of the metric.
values_dtype: Data type of the tensor for which to track changes.
dtype: Data type of the moving mean.
"""
def __init__(self, values_dtype="int8", name="flip_ratio", dtype=None):
super().__init__(name=name, dtype=dtype)
self.built = False
self.values_dtype = tf.as_dtype(values_dtype)
def build(self, input_shape):
self._previous_values = self.add_weight(
"previous_values",
shape=input_shape,
dtype=self.values_dtype,
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.total = self.add_weight(
"total",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.count = self.add_weight(
"count",
initializer=tf.keras.initializers.zeros,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
self._size = tf.cast(np.prod(input_shape), self.dtype)
self.built = True
def update_state(self, values, sample_weight=None):
values = tf.cast(values, self.values_dtype)
if not self.built:
with tf.name_scope(self.name), tf.init_scope():
self.build(values.shape)
unchanged_values = tf.math.count_nonzero(
tf.equal(self._previous_values, values)
)
flip_ratio = 1 - (
tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype)
)
update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))
with tf.control_dependencies([update_total_op]):
update_count_op = self.count.assign_add(1)
with tf.control_dependencies([update_count_op]):
return self._previous_values.assign(values)
def result(self):
return tf.compat.v1.div_no_nan(self.total, self.count - 1)
def reset_state(self):
tf.keras.backend.batch_set_value(
[(v, 0) for v in self.variables if v is not self._previous_values]
)
def reset_states(self):
self.reset_state() # For backwards compatibility with < 2.5
def get_config(self):
return {**super().get_config(), "values_dtype": self.values_dtype.name}
| 3,341 | 34.935484 | 86 | py |
larq | larq-main/larq/activations.py | """Activations can either be used through an `Activation` layer, or through the
`activation` argument supported by all forward layers:
```python
import tensorflow as tf
import larq as lq
model.add(lq.layers.QuantDense(64))
model.add(tf.keras.layers.Activation('hard_tanh'))
```
This is equivalent to:
```python
model.add(lq.layers.QuantDense(64, activation='hard_tanh'))
```
You can also pass an element-wise TensorFlow function as an activation:
```python
model.add(lq.layers.QuantDense(64, activation=lq.activations.hard_tanh))
```
"""
import tensorflow as tf
from larq import utils
@utils.register_keras_custom_object
def hard_tanh(x: tf.Tensor) -> tf.Tensor:
"""Hard tanh activation function.
```plot-activation
activations.hard_tanh
```
# Arguments
x: Input tensor.
# Returns
Hard tanh activation.
"""
return tf.clip_by_value(x, -1, 1)
@utils.register_keras_custom_object
def leaky_tanh(x: tf.Tensor, alpha: float = 0.2) -> tf.Tensor:
r"""Leaky tanh activation function.
Similar to hard tanh, but with non-zero slopes as in leaky ReLU.
```plot-activation
activations.leaky_tanh
```
# Arguments
x: Input tensor.
alpha: Slope of the activation function outside of [-1, 1].
# Returns
Leaky tanh activation.
"""
return (
tf.clip_by_value(x, -1, 1)
+ (tf.math.maximum(x, 1) - 1) * alpha
+ (tf.math.minimum(x, -1) + 1) * alpha
)
| 1,481 | 21.119403 | 79 | py |
larq | larq-main/larq/__init__.py | from larq import ( # pytype: disable=pyi-error
activations,
callbacks,
constraints,
context,
layers,
math,
metrics,
models,
optimizers,
quantizers,
utils,
)
try:
from importlib import metadata # type: ignore
except ImportError:
# Running on pre-3.8 Python; use importlib-metadata package
import importlib_metadata as metadata # type: ignore
__version__ = metadata.version("larq")
__all__ = [
"layers",
"activations",
"callbacks",
"constraints",
"context",
"math",
"metrics",
"models",
"quantizers",
"optimizers",
"utils",
]
| 630 | 16.527778 | 63 | py |
larq | larq-main/larq/quantizers_test.py | import functools
import numpy as np
import pytest
import tensorflow as tf
from packaging import version
import larq as lq
from larq import testing_utils
class DummyTrainableQuantizer(tf.keras.layers.Layer):
"""Used to test whether we can set layers as quantizers without any throws."""
_custom_metrics = None
def build(self, input_shape):
self.dummy_weight = self.add_weight("dummy_weight", trainable=True)
super().build(input_shape)
def call(self, inputs):
return self.dummy_weight * inputs
class TestCommonFunctionality:
"""Test functionality common to all quantizers, like serialization and usage."""
@pytest.mark.parametrize("module", [lq.quantizers, tf.keras.activations])
@pytest.mark.parametrize(
"name,ref_cls",
[
("ste_sign", lq.quantizers.SteSign),
("approx_sign", lq.quantizers.ApproxSign),
("ste_heaviside", lq.quantizers.SteHeaviside),
("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
("swish_sign", lq.quantizers.SwishSign),
("ste_tern", lq.quantizers.SteTern),
],
)
def test_serialization(self, module, name, ref_cls):
if module == tf.keras.activations and (
version.parse(tf.__version__) < version.parse("2.13")
):
# New serialisation in Keras doesn't support using quantizers strings as activations
fn = module.get(name)
assert fn.__class__ == ref_cls
fn = module.get(ref_cls())
assert fn.__class__ == ref_cls
assert type(fn.precision) == int
if module == tf.keras.activations and (
version.parse(tf.__version__) < version.parse("1.15")
):
pytest.skip(
"TensorFlow < 1.15 does not support Quantizer classes as activations"
)
config = module.serialize(fn)
fn = module.deserialize(config)
assert fn.__class__ == ref_cls
assert type(fn.precision) == int
def test_noop_serialization(self):
fn = lq.quantizers.get(lq.quantizers.NoOp(precision=1))
assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
config = lq.quantizers.serialize(fn)
fn = lq.quantizers.deserialize(config)
assert fn.__class__ == lq.quantizers.NoOp
assert fn.precision == 1
def test_invalid_usage(self):
with pytest.raises(ValueError):
lq.quantizers.get(42)
with pytest.raises(ValueError):
lq.quantizers.get("unknown")
with pytest.raises(ValueError):
lq.quantizers.DoReFa(k_bit=2, mode="unknown")
f = lq.quantizers.DoReFa(k_bit=2, mode="activations")
f.mode = "unknown"
with pytest.raises(ValueError):
f.call([0.0])
@pytest.mark.parametrize("quantizer", ["input_quantizer", "kernel_quantizer"])
def test_layer_as_quantizer(self, quantizer, keras_should_run_eagerly):
"""Test whether a keras.layers.Layer can be used as quantizer."""
input_data = testing_utils.random_input((1, 10))
model = tf.keras.Sequential(
[lq.layers.QuantDense(1, **{quantizer: DummyTrainableQuantizer()})]
)
model.compile(optimizer="sgd", loss="mse", run_eagerly=keras_should_run_eagerly)
model.fit(input_data, np.ones((1,)), epochs=1)
assert any(["dummy_weight" in var.name for var in model.trainable_variables])
class TestQuantization:
"""Test binarization and ternarization."""
@pytest.mark.parametrize(
"fn",
[
"ste_sign",
lq.quantizers.SteSign(),
"approx_sign",
lq.quantizers.ApproxSign(),
"swish_sign",
lq.quantizers.SwishSign(),
],
)
def test_xnor_binarization(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
binarized_values = np.random.choice([-1, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert not np.any(result == 0)
assert np.all(result[real_values < 0] == -1)
assert np.all(result[real_values >= 0] == 1)
zero_values = np.zeros((2, 5))
result = f([zero_values])[0]
assert np.all(result == 1)
@pytest.mark.parametrize("fn", ["ste_heaviside", lq.quantizers.SteHeaviside()])
def test_and_binarization(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
binarized_values = np.random.choice([0, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values <= 0] == 0)
assert np.all(result[real_values > 0] == 1)
@pytest.mark.usefixtures("eager_mode")
def test_magnitude_aware_sign_binarization(self):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
y = lq.quantizers.MagnitudeAwareSign()(x)
assert y.shape == x.shape
# check sign
np.testing.assert_allclose(tf.sign(y).numpy(), np.sign(a))
# check magnitude
np.testing.assert_allclose(
tf.reduce_mean(tf.abs(y), axis=[0, 1, 2]).numpy(),
[np.mean(np.reshape(np.abs(a[:, :, :, i]), [-1])) for i in range(3)],
)
@pytest.mark.parametrize(
"fn",
[
"ste_tern",
lq.quantizers.SteTern(),
lq.quantizers.SteTern(ternary_weight_networks=True),
lq.quantizers.SteTern(threshold_value=np.random.uniform(0.01, 0.8)),
],
)
def test_ternarization_basic(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
ternarized_values = np.random.choice([-1, 0, 1], size=(4, 10))
result = f([ternarized_values])[0]
np.testing.assert_allclose(result, ternarized_values)
assert not np.any(result > 1)
assert not np.any(result < -1)
assert np.any(result == -1)
assert np.any(result == 1)
assert np.any(result == 0)
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert not np.any(result > 1)
assert not np.any(result < -1)
assert np.any(result == -1)
assert np.any(result == 1)
assert np.any(result == 0)
@pytest.mark.parametrize("fn", ["ste_tern", lq.quantizers.SteTern()])
def test_ternarization_with_default_threshold(self, fn):
x = tf.keras.backend.placeholder(ndim=2)
test_threshold = 0.05 # This is the default
f = tf.keras.backend.function([x], [lq.quantizers.get(fn)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
def test_ternarization_with_custom_threshold(self):
x = tf.keras.backend.placeholder(ndim=2)
test_threshold = np.random.uniform(0.01, 0.8)
fn = lq.quantizers.SteTern(threshold_value=test_threshold)
f = tf.keras.backend.function([x], [fn(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
def test_ternarization_with_ternary_weight_networks(self):
x = tf.keras.backend.placeholder(ndim=2)
real_values = testing_utils.generate_real_values_with_zeros()
test_threshold = 0.7 * np.sum(np.abs(real_values)) / np.size(real_values)
fn = lq.quantizers.SteTern(ternary_weight_networks=True)
f = tf.keras.backend.function([x], [fn(x)])
result = f([real_values])[0]
assert np.all(result[real_values > test_threshold] == 1)
assert np.all(result[real_values < -test_threshold] == -1)
assert np.all(result[np.abs(real_values) < test_threshold] == 0)
assert not np.any(result > 1)
assert not np.any(result < -1)
@pytest.mark.parametrize("k_bit", [1, 2, 4, 6, 8])
@pytest.mark.parametrize("mode", ["activations", "weights"])
def test_dorefa_quantize(self, k_bit, mode):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [lq.quantizers.DoReFa(k_bit, mode)(x)])
real_values = testing_utils.generate_real_values_with_zeros()
result = f([real_values])[0]
n = 2**k_bit - 1
if mode == "weights":
# Create the preprocessed and scaled stimulus, which is then ready to
# go through the same test like for the activation quantizer
divider = np.amax(np.abs(np.tanh(real_values)))
real_values = np.tanh(real_values) / divider
real_values = (real_values / 2.0) + 0.5
# The results, which are currently on [-1, 1] range get the same
# scaling, so they behave like they were created on the activation
# range and can be tested like that
result = result / 2.0 + 0.5
assert not np.any(result > 1)
assert not np.any(result < 0)
for i in range(n + 1):
np.testing.assert_allclose(
result[
(real_values > (2 * i - 1) / (2 * n))
& (real_values < (2 * i + 1) / (2 * n))
],
i / n,
atol=1e-6,
)
@pytest.mark.usefixtures("eager_mode")
class TestGradients:
"""Test gradients for different quantizers."""
@pytest.mark.parametrize(
"fn",
[
lq.quantizers.SteSign(clip_value=None),
lq.quantizers.SteTern(clip_value=None),
lq.quantizers.SteHeaviside(clip_value=None),
],
)
def test_identity_ste_grad(self, fn):
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = fn(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), np.ones_like(x))
@pytest.mark.parametrize(
"fn",
[
lq.quantizers.SteSign(),
lq.quantizers.SteTern(),
lq.quantizers.SteHeaviside(),
],
)
def test_ste_grad(self, fn):
@np.vectorize
def ste_grad(x):
if np.abs(x) <= 1:
return 1.0
return 0.0
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = fn(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), ste_grad(x))
# Test with and without default threshold
def test_swish_grad(self):
def swish_grad(x, beta):
return (
beta * (2 - beta * x * np.tanh(beta * x / 2)) / (1 + np.cosh(beta * x))
)
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.SwishSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=5.0))
with tf.GradientTape() as tape:
activation = lq.quantizers.SwishSign(beta=10.0)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=10.0))
def test_approx_sign_grad(self):
@np.vectorize
def approx_sign_grad(x):
if np.abs(x) <= 1:
return 2 - 2 * np.abs(x)
return 0.0
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.ApproxSign()(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), approx_sign_grad(x))
def test_magnitude_aware_sign_grad(self):
a = np.random.uniform(-2, 2, (3, 2, 2, 3))
x = tf.Variable(a)
with tf.GradientTape() as tape:
y = lq.quantizers.MagnitudeAwareSign()(x)
grad = tape.gradient(y, x)
scale_vector = [
np.mean(np.reshape(np.abs(a[:, :, :, i]), [-1])) for i in range(3)
]
np.testing.assert_allclose(
grad.numpy(), np.where(abs(a) < 1, np.ones(a.shape) * scale_vector, 0)
)
@pytest.mark.parametrize("mode", ["activations", "weights"])
def test_dorefa_ste_grad(self, mode):
@np.vectorize
def ste_grad(x):
if x <= 1 and x >= 0:
return 1.0
return 0.0
def tanh_grad(x):
# 1/(cosh**2) is the derivative of tanh. The gradients of the
# scaling operations cancel each other and the gradient of the
# quantizek function is supposed to be 1 everywhere, because it
# is used on its linear region only. tanh does all the limiting.
dividend = np.amax(np.abs(np.tanh(x)))
return 1 / (np.cosh(x) ** 2.0) / dividend
expected_gradient = ste_grad if mode == "activations" else tanh_grad
x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
tf_x = tf.Variable(x)
with tf.GradientTape() as tape:
activation = lq.quantizers.DoReFa(2, mode)(tf_x)
grad = tape.gradient(activation, tf_x)
np.testing.assert_allclose(grad.numpy(), expected_gradient(x))
@pytest.mark.parametrize(
"quantizer",
[
("ste_sign", lq.quantizers.SteSign),
("approx_sign", lq.quantizers.ApproxSign),
("ste_heaviside", lq.quantizers.SteHeaviside),
("swish_sign", lq.quantizers.SwishSign),
("magnitude_aware_sign", lq.quantizers.MagnitudeAwareSign),
("ste_tern", lq.quantizers.SteTern),
("dorefa_quantizer", lq.quantizers.DoReFa),
("dorefa_quantizer", functools.partial(lq.quantizers.DoReFa, mode="weights")),
],
)
def test_metrics(quantizer):
quantizer_str, quantizer_cls = quantizer
# No metric
model = tf.keras.models.Sequential(
[lq.layers.QuantDense(3, kernel_quantizer=quantizer_str, input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
assert len(model.layers[0]._metrics) == 0
# Metric added using scope
with lq.context.metrics_scope(["flip_ratio"]):
model = tf.keras.models.Sequential(
[lq.layers.QuantDense(3, kernel_quantizer=quantizer_str, input_shape=(32,))]
)
model.compile(loss="mse", optimizer="sgd")
if version.parse(tf.__version__) > version.parse("1.14"):
assert len(model.layers[0].kernel_quantizer._metrics) == 1
else:
# In TF1.14, call() gets called twice, resulting in having an extra initial
# metrics copy.
assert len(model.layers[0].kernel_quantizer._metrics) == 2
# Metric added explicitly to quantizer
model = tf.keras.models.Sequential(
[
lq.layers.QuantDense(
3,
kernel_quantizer=quantizer_cls(metrics=["flip_ratio"]),
input_shape=(32,),
)
]
)
model.compile(loss="mse", optimizer="sgd")
if version.parse(tf.__version__) > version.parse("1.14"):
assert len(model.layers[0].kernel_quantizer._metrics) == 1
else:
# In TF1.14, call() gets called twice, resulting in having an extra initial
# metrics copy.
assert len(model.layers[0].kernel_quantizer._metrics) == 2
def test_get_kernel_quantizer_assigns_metrics():
with lq.context.metrics_scope(["flip_ratio"]):
ste_sign = lq.quantizers.get_kernel_quantizer("ste_sign")
assert "flip_ratio" in lq.context.get_training_metrics()
assert isinstance(ste_sign, lq.quantizers.SteSign)
assert "flip_ratio" in ste_sign._custom_metrics
def test_get_kernel_quantizer_accepts_function():
custom_quantizer = lq.quantizers.get_kernel_quantizer(lambda x: x)
assert callable(custom_quantizer)
assert not hasattr(custom_quantizer, "_custom_metrics")
def test_backwards_compat_aliases():
assert lq.quantizers.DoReFaQuantizer == lq.quantizers.DoReFa
assert lq.quantizers.NoOpQuantizer == lq.quantizers.NoOp
| 17,283 | 37.238938 | 96 | py |
larq | larq-main/larq/utils_test.py | from larq import utils
def test_memory_as_readable_str():
correct_strings = [ # 2^i bits, from i = 0 to 74
"0.12 B",
"0.25 B",
"0.50 B",
"1.00 B",
"2.00 B",
"4.00 B",
"8.00 B",
"16.00 B",
"32.00 B",
"64.00 B",
"128.00 B",
"256.00 B",
"512.00 B",
"1.00 KiB",
"2.00 KiB",
"4.00 KiB",
"8.00 KiB",
"16.00 KiB",
"32.00 KiB",
"64.00 KiB",
"128.00 KiB",
"256.00 KiB",
"512.00 KiB",
"1.00 MiB",
"2.00 MiB",
"4.00 MiB",
"8.00 MiB",
"16.00 MiB",
"32.00 MiB",
"64.00 MiB",
"128.00 MiB",
"256.00 MiB",
"512.00 MiB",
"1.00 GiB",
"2.00 GiB",
"4.00 GiB",
"8.00 GiB",
"16.00 GiB",
"32.00 GiB",
"64.00 GiB",
"128.00 GiB",
"256.00 GiB",
"512.00 GiB",
"1,024.00 GiB",
]
for i, correct_string in enumerate(correct_strings):
assert utils.memory_as_readable_str(2**i) == correct_string
def test_set_precision():
@utils.set_precision(8)
def toy_quantizer(x):
return x
assert toy_quantizer.precision == 8
| 1,280 | 19.66129 | 67 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.