id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
25,700 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | find_undeclared | def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared | python | def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared | [
"def",
"find_undeclared",
"(",
"nodes",
",",
"names",
")",
":",
"visitor",
"=",
"UndeclaredNameVisitor",
"(",
"names",
")",
"try",
":",
"for",
"node",
"in",
"nodes",
":",
"visitor",
".",
"visit",
"(",
"node",
")",
"except",
"VisitorExit",
":",
"pass",
"return",
"visitor",
".",
"undeclared"
] | Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found. | [
"Check",
"if",
"the",
"names",
"passed",
"are",
"accessed",
"undeclared",
".",
"The",
"return",
"value",
"is",
"a",
"set",
"of",
"all",
"the",
"undeclared",
"names",
"from",
"the",
"sequence",
"of",
"names",
"found",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L108-L118 |
25,701 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | Frame.inner | def inner(self, isolated=False):
"""Return an inner frame."""
if isolated:
return Frame(self.eval_ctx, level=self.symbols.level + 1)
return Frame(self.eval_ctx, self) | python | def inner(self, isolated=False):
"""Return an inner frame."""
if isolated:
return Frame(self.eval_ctx, level=self.symbols.level + 1)
return Frame(self.eval_ctx, self) | [
"def",
"inner",
"(",
"self",
",",
"isolated",
"=",
"False",
")",
":",
"if",
"isolated",
":",
"return",
"Frame",
"(",
"self",
".",
"eval_ctx",
",",
"level",
"=",
"self",
".",
"symbols",
".",
"level",
"+",
"1",
")",
"return",
"Frame",
"(",
"self",
".",
"eval_ctx",
",",
"self",
")"
] | Return an inner frame. | [
"Return",
"an",
"inner",
"frame",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L172-L176 |
25,702 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.buffer | def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer) | python | def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer) | [
"def",
"buffer",
"(",
"self",
",",
"frame",
")",
":",
"frame",
".",
"buffer",
"=",
"self",
".",
"temporary_identifier",
"(",
")",
"self",
".",
"writeline",
"(",
"'%s = []'",
"%",
"frame",
".",
"buffer",
")"
] | Enable buffering for the frame from that point onwards. | [
"Enable",
"buffering",
"for",
"the",
"frame",
"from",
"that",
"point",
"onwards",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L322-L325 |
25,703 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.return_buffer_contents | def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer) | python | def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer) | [
"def",
"return_buffer_contents",
"(",
"self",
",",
"frame",
",",
"force_unescaped",
"=",
"False",
")",
":",
"if",
"not",
"force_unescaped",
":",
"if",
"frame",
".",
"eval_ctx",
".",
"volatile",
":",
"self",
".",
"writeline",
"(",
"'if context.eval_ctx.autoescape:'",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'return Markup(concat(%s))'",
"%",
"frame",
".",
"buffer",
")",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"'else:'",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'return concat(%s)'",
"%",
"frame",
".",
"buffer",
")",
"self",
".",
"outdent",
"(",
")",
"return",
"elif",
"frame",
".",
"eval_ctx",
".",
"autoescape",
":",
"self",
".",
"writeline",
"(",
"'return Markup(concat(%s))'",
"%",
"frame",
".",
"buffer",
")",
"return",
"self",
".",
"writeline",
"(",
"'return concat(%s)'",
"%",
"frame",
".",
"buffer",
")"
] | Return the buffer contents of the frame. | [
"Return",
"the",
"buffer",
"contents",
"of",
"the",
"frame",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L327-L343 |
25,704 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.start_write | def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node) | python | def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node) | [
"def",
"start_write",
"(",
"self",
",",
"frame",
",",
"node",
"=",
"None",
")",
":",
"if",
"frame",
".",
"buffer",
"is",
"None",
":",
"self",
".",
"writeline",
"(",
"'yield '",
",",
"node",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'%s.append('",
"%",
"frame",
".",
"buffer",
",",
"node",
")"
] | Yield or write into the frame buffer. | [
"Yield",
"or",
"write",
"into",
"the",
"frame",
"buffer",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L353-L358 |
25,705 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.simple_write | def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame) | python | def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame) | [
"def",
"simple_write",
"(",
"self",
",",
"s",
",",
"frame",
",",
"node",
"=",
"None",
")",
":",
"self",
".",
"start_write",
"(",
"frame",
",",
"node",
")",
"self",
".",
"write",
"(",
"s",
")",
"self",
".",
"end_write",
"(",
"frame",
")"
] | Simple shortcut for start_write + write + end_write. | [
"Simple",
"shortcut",
"for",
"start_write",
"+",
"write",
"+",
"end_write",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L365-L369 |
25,706 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.write | def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x) | python | def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x) | [
"def",
"write",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"_new_lines",
":",
"if",
"not",
"self",
".",
"_first_write",
":",
"self",
".",
"stream",
".",
"write",
"(",
"'\\n'",
"*",
"self",
".",
"_new_lines",
")",
"self",
".",
"code_lineno",
"+=",
"self",
".",
"_new_lines",
"if",
"self",
".",
"_write_debug_info",
"is",
"not",
"None",
":",
"self",
".",
"debug_info",
".",
"append",
"(",
"(",
"self",
".",
"_write_debug_info",
",",
"self",
".",
"code_lineno",
")",
")",
"self",
".",
"_write_debug_info",
"=",
"None",
"self",
".",
"_first_write",
"=",
"False",
"self",
".",
"stream",
".",
"write",
"(",
"' '",
"*",
"self",
".",
"_indentation",
")",
"self",
".",
"_new_lines",
"=",
"0",
"self",
".",
"stream",
".",
"write",
"(",
"x",
")"
] | Write a string into the output stream. | [
"Write",
"a",
"string",
"into",
"the",
"output",
"stream",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L382-L395 |
25,707 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.writeline | def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x) | python | def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x) | [
"def",
"writeline",
"(",
"self",
",",
"x",
",",
"node",
"=",
"None",
",",
"extra",
"=",
"0",
")",
":",
"self",
".",
"newline",
"(",
"node",
",",
"extra",
")",
"self",
".",
"write",
"(",
"x",
")"
] | Combination of newline and write. | [
"Combination",
"of",
"newline",
"and",
"write",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L397-L400 |
25,708 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.newline | def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno | python | def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno | [
"def",
"newline",
"(",
"self",
",",
"node",
"=",
"None",
",",
"extra",
"=",
"0",
")",
":",
"self",
".",
"_new_lines",
"=",
"max",
"(",
"self",
".",
"_new_lines",
",",
"1",
"+",
"extra",
")",
"if",
"node",
"is",
"not",
"None",
"and",
"node",
".",
"lineno",
"!=",
"self",
".",
"_last_line",
":",
"self",
".",
"_write_debug_info",
"=",
"node",
".",
"lineno",
"self",
".",
"_last_line",
"=",
"node",
".",
"lineno"
] | Add one or more newlines before the next write. | [
"Add",
"one",
"or",
"more",
"newlines",
"before",
"the",
"next",
"write",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L402-L407 |
25,709 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.signature | def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame) | python | def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame) | [
"def",
"signature",
"(",
"self",
",",
"node",
",",
"frame",
",",
"extra_kwargs",
"=",
"None",
")",
":",
"# if any of the given keyword arguments is a python keyword",
"# we have to make sure that no invalid call is created.",
"kwarg_workaround",
"=",
"False",
"for",
"kwarg",
"in",
"chain",
"(",
"(",
"x",
".",
"key",
"for",
"x",
"in",
"node",
".",
"kwargs",
")",
",",
"extra_kwargs",
"or",
"(",
")",
")",
":",
"if",
"is_python_keyword",
"(",
"kwarg",
")",
":",
"kwarg_workaround",
"=",
"True",
"break",
"for",
"arg",
"in",
"node",
".",
"args",
":",
"self",
".",
"write",
"(",
"', '",
")",
"self",
".",
"visit",
"(",
"arg",
",",
"frame",
")",
"if",
"not",
"kwarg_workaround",
":",
"for",
"kwarg",
"in",
"node",
".",
"kwargs",
":",
"self",
".",
"write",
"(",
"', '",
")",
"self",
".",
"visit",
"(",
"kwarg",
",",
"frame",
")",
"if",
"extra_kwargs",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"extra_kwargs",
")",
":",
"self",
".",
"write",
"(",
"', %s=%s'",
"%",
"(",
"key",
",",
"value",
")",
")",
"if",
"node",
".",
"dyn_args",
":",
"self",
".",
"write",
"(",
"', *'",
")",
"self",
".",
"visit",
"(",
"node",
".",
"dyn_args",
",",
"frame",
")",
"if",
"kwarg_workaround",
":",
"if",
"node",
".",
"dyn_kwargs",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"', **dict({'",
")",
"else",
":",
"self",
".",
"write",
"(",
"', **{'",
")",
"for",
"kwarg",
"in",
"node",
".",
"kwargs",
":",
"self",
".",
"write",
"(",
"'%r: '",
"%",
"kwarg",
".",
"key",
")",
"self",
".",
"visit",
"(",
"kwarg",
".",
"value",
",",
"frame",
")",
"self",
".",
"write",
"(",
"', '",
")",
"if",
"extra_kwargs",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"extra_kwargs",
")",
":",
"self",
".",
"write",
"(",
"'%r: %s, '",
"%",
"(",
"key",
",",
"value",
")",
")",
"if",
"node",
".",
"dyn_kwargs",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"'}, **'",
")",
"self",
".",
"visit",
"(",
"node",
".",
"dyn_kwargs",
",",
"frame",
")",
"self",
".",
"write",
"(",
"')'",
")",
"else",
":",
"self",
".",
"write",
"(",
"'}'",
")",
"elif",
"node",
".",
"dyn_kwargs",
"is",
"not",
"None",
":",
"self",
".",
"write",
"(",
"', **'",
")",
"self",
".",
"visit",
"(",
"node",
".",
"dyn_kwargs",
",",
"frame",
")"
] | Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict. | [
"Writes",
"a",
"function",
"call",
"to",
"the",
"stream",
"for",
"the",
"current",
"node",
".",
"A",
"leading",
"comma",
"is",
"added",
"automatically",
".",
"The",
"extra",
"keyword",
"arguments",
"may",
"not",
"include",
"python",
"keywords",
"otherwise",
"a",
"syntax",
"error",
"could",
"occour",
".",
"The",
"extra",
"keyword",
"arguments",
"should",
"be",
"given",
"as",
"python",
"dict",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L409-L460 |
25,710 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.pull_dependencies | def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name)) | python | def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name)) | [
"def",
"pull_dependencies",
"(",
"self",
",",
"nodes",
")",
":",
"visitor",
"=",
"DependencyFinderVisitor",
"(",
")",
"for",
"node",
"in",
"nodes",
":",
"visitor",
".",
"visit",
"(",
"node",
")",
"for",
"dependency",
"in",
"'filters'",
",",
"'tests'",
":",
"mapping",
"=",
"getattr",
"(",
"self",
",",
"dependency",
")",
"for",
"name",
"in",
"getattr",
"(",
"visitor",
",",
"dependency",
")",
":",
"if",
"name",
"not",
"in",
"mapping",
":",
"mapping",
"[",
"name",
"]",
"=",
"self",
".",
"temporary_identifier",
"(",
")",
"self",
".",
"writeline",
"(",
"'%s = environment.%s[%r]'",
"%",
"(",
"mapping",
"[",
"name",
"]",
",",
"dependency",
",",
"name",
")",
")"
] | Pull all the dependencies. | [
"Pull",
"all",
"the",
"dependencies",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L462-L473 |
25,711 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.position | def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv | python | def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv | [
"def",
"position",
"(",
"self",
",",
"node",
")",
":",
"rv",
"=",
"'line %d'",
"%",
"node",
".",
"lineno",
"if",
"self",
".",
"name",
"is",
"not",
"None",
":",
"rv",
"+=",
"' in '",
"+",
"repr",
"(",
"self",
".",
"name",
")",
"return",
"rv"
] | Return a human readable position for the node. | [
"Return",
"a",
"human",
"readable",
"position",
"for",
"the",
"node",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L593-L598 |
25,712 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.pop_assign_tracking | def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names))) | python | def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names))) | [
"def",
"pop_assign_tracking",
"(",
"self",
",",
"frame",
")",
":",
"vars",
"=",
"self",
".",
"_assign_stack",
".",
"pop",
"(",
")",
"if",
"not",
"frame",
".",
"toplevel",
"or",
"not",
"vars",
":",
"return",
"public_names",
"=",
"[",
"x",
"for",
"x",
"in",
"vars",
"if",
"x",
"[",
":",
"1",
"]",
"!=",
"'_'",
"]",
"if",
"len",
"(",
"vars",
")",
"==",
"1",
":",
"name",
"=",
"next",
"(",
"iter",
"(",
"vars",
")",
")",
"ref",
"=",
"frame",
".",
"symbols",
".",
"ref",
"(",
"name",
")",
"self",
".",
"writeline",
"(",
"'context.vars[%r] = %s'",
"%",
"(",
"name",
",",
"ref",
")",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'context.vars.update({'",
")",
"for",
"idx",
",",
"name",
"in",
"enumerate",
"(",
"vars",
")",
":",
"if",
"idx",
":",
"self",
".",
"write",
"(",
"', '",
")",
"ref",
"=",
"frame",
".",
"symbols",
".",
"ref",
"(",
"name",
")",
"self",
".",
"write",
"(",
"'%r: %s'",
"%",
"(",
"name",
",",
"ref",
")",
")",
"self",
".",
"write",
"(",
"'})'",
")",
"if",
"public_names",
":",
"if",
"len",
"(",
"public_names",
")",
"==",
"1",
":",
"self",
".",
"writeline",
"(",
"'context.exported_vars.add(%r)'",
"%",
"public_names",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'context.exported_vars.update((%s))'",
"%",
"', '",
".",
"join",
"(",
"imap",
"(",
"repr",
",",
"public_names",
")",
")",
")"
] | Pops the topmost level for assignment tracking and updates the
context variables if necessary. | [
"Pops",
"the",
"topmost",
"level",
"for",
"assignment",
"tracking",
"and",
"updates",
"the",
"context",
"variables",
"if",
"necessary",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L665-L691 |
25,713 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.visit_Extends | def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1 | python | def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1 | [
"def",
"visit_Extends",
"(",
"self",
",",
"node",
",",
"frame",
")",
":",
"if",
"not",
"frame",
".",
"toplevel",
":",
"self",
".",
"fail",
"(",
"'cannot use extend from a non top-level scope'",
",",
"node",
".",
"lineno",
")",
"# if the number of extends statements in general is zero so",
"# far, we don't have to add a check if something extended",
"# the template before this one.",
"if",
"self",
".",
"extends_so_far",
">",
"0",
":",
"# if we have a known extends we just add a template runtime",
"# error into the generated code. We could catch that at compile",
"# time too, but i welcome it not to confuse users by throwing the",
"# same error at different times just \"because we can\".",
"if",
"not",
"self",
".",
"has_known_extends",
":",
"self",
".",
"writeline",
"(",
"'if parent_template is not None:'",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'raise TemplateRuntimeError(%r)'",
"%",
"'extended multiple times'",
")",
"# if we have a known extends already we don't need that code here",
"# as we know that the template execution will end here.",
"if",
"self",
".",
"has_known_extends",
":",
"raise",
"CompilerExit",
"(",
")",
"else",
":",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"'parent_template = environment.get_template('",
",",
"node",
")",
"self",
".",
"visit",
"(",
"node",
".",
"template",
",",
"frame",
")",
"self",
".",
"write",
"(",
"', %r)'",
"%",
"self",
".",
"name",
")",
"self",
".",
"writeline",
"(",
"'for name, parent_block in parent_template.'",
"'blocks.%s():'",
"%",
"dict_item_iter",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'context.blocks.setdefault(name, []).'",
"'append(parent_block)'",
")",
"self",
".",
"outdent",
"(",
")",
"# if this extends statement was in the root level we can take",
"# advantage of that information and simplify the generated code",
"# in the top level from this point onwards",
"if",
"frame",
".",
"rootlevel",
":",
"self",
".",
"has_known_extends",
"=",
"True",
"# and now we have one more",
"self",
".",
"extends_so_far",
"+=",
"1"
] | Calls the extender. | [
"Calls",
"the",
"extender",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L843-L888 |
25,714 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.visit_Include | def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent() | python | def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent() | [
"def",
"visit_Include",
"(",
"self",
",",
"node",
",",
"frame",
")",
":",
"if",
"node",
".",
"ignore_missing",
":",
"self",
".",
"writeline",
"(",
"'try:'",
")",
"self",
".",
"indent",
"(",
")",
"func_name",
"=",
"'get_or_select_template'",
"if",
"isinstance",
"(",
"node",
".",
"template",
",",
"nodes",
".",
"Const",
")",
":",
"if",
"isinstance",
"(",
"node",
".",
"template",
".",
"value",
",",
"string_types",
")",
":",
"func_name",
"=",
"'get_template'",
"elif",
"isinstance",
"(",
"node",
".",
"template",
".",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"func_name",
"=",
"'select_template'",
"elif",
"isinstance",
"(",
"node",
".",
"template",
",",
"(",
"nodes",
".",
"Tuple",
",",
"nodes",
".",
"List",
")",
")",
":",
"func_name",
"=",
"'select_template'",
"self",
".",
"writeline",
"(",
"'template = environment.%s('",
"%",
"func_name",
",",
"node",
")",
"self",
".",
"visit",
"(",
"node",
".",
"template",
",",
"frame",
")",
"self",
".",
"write",
"(",
"', %r)'",
"%",
"self",
".",
"name",
")",
"if",
"node",
".",
"ignore_missing",
":",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"'except TemplateNotFound:'",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'pass'",
")",
"self",
".",
"outdent",
"(",
")",
"self",
".",
"writeline",
"(",
"'else:'",
")",
"self",
".",
"indent",
"(",
")",
"skip_event_yield",
"=",
"False",
"if",
"node",
".",
"with_context",
":",
"loop",
"=",
"self",
".",
"environment",
".",
"is_async",
"and",
"'async for'",
"or",
"'for'",
"self",
".",
"writeline",
"(",
"'%s event in template.root_render_func('",
"'template.new_context(context.get_all(), True, '",
"'%s)):'",
"%",
"(",
"loop",
",",
"self",
".",
"dump_local_context",
"(",
"frame",
")",
")",
")",
"elif",
"self",
".",
"environment",
".",
"is_async",
":",
"self",
".",
"writeline",
"(",
"'for event in (await '",
"'template._get_default_module_async())'",
"'._body_stream:'",
")",
"else",
":",
"if",
"supports_yield_from",
":",
"self",
".",
"writeline",
"(",
"'yield from template._get_default_module()'",
"'._body_stream'",
")",
"skip_event_yield",
"=",
"True",
"else",
":",
"self",
".",
"writeline",
"(",
"'for event in template._get_default_module()'",
"'._body_stream:'",
")",
"if",
"not",
"skip_event_yield",
":",
"self",
".",
"indent",
"(",
")",
"self",
".",
"simple_write",
"(",
"'event'",
",",
"frame",
")",
"self",
".",
"outdent",
"(",
")",
"if",
"node",
".",
"ignore_missing",
":",
"self",
".",
"outdent",
"(",
")"
] | Handles includes. | [
"Handles",
"includes",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L890-L942 |
25,715 | pypa/pipenv | pipenv/vendor/jinja2/compiler.py | CodeGenerator.visit_FromImport | def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names))) | python | def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names))) | [
"def",
"visit_FromImport",
"(",
"self",
",",
"node",
",",
"frame",
")",
":",
"self",
".",
"newline",
"(",
"node",
")",
"self",
".",
"write",
"(",
"'included_template = %senvironment.get_template('",
"%",
"(",
"self",
".",
"environment",
".",
"is_async",
"and",
"'await '",
"or",
"''",
")",
")",
"self",
".",
"visit",
"(",
"node",
".",
"template",
",",
"frame",
")",
"self",
".",
"write",
"(",
"', %r).'",
"%",
"self",
".",
"name",
")",
"if",
"node",
".",
"with_context",
":",
"self",
".",
"write",
"(",
"'make_module%s(context.get_all(), True, %s)'",
"%",
"(",
"self",
".",
"environment",
".",
"is_async",
"and",
"'_async'",
"or",
"''",
",",
"self",
".",
"dump_local_context",
"(",
"frame",
")",
")",
")",
"elif",
"self",
".",
"environment",
".",
"is_async",
":",
"self",
".",
"write",
"(",
"'_get_default_module_async()'",
")",
"else",
":",
"self",
".",
"write",
"(",
"'_get_default_module()'",
")",
"var_names",
"=",
"[",
"]",
"discarded_names",
"=",
"[",
"]",
"for",
"name",
"in",
"node",
".",
"names",
":",
"if",
"isinstance",
"(",
"name",
",",
"tuple",
")",
":",
"name",
",",
"alias",
"=",
"name",
"else",
":",
"alias",
"=",
"name",
"self",
".",
"writeline",
"(",
"'%s = getattr(included_template, '",
"'%r, missing)'",
"%",
"(",
"frame",
".",
"symbols",
".",
"ref",
"(",
"alias",
")",
",",
"name",
")",
")",
"self",
".",
"writeline",
"(",
"'if %s is missing:'",
"%",
"frame",
".",
"symbols",
".",
"ref",
"(",
"alias",
")",
")",
"self",
".",
"indent",
"(",
")",
"self",
".",
"writeline",
"(",
"'%s = undefined(%r %% '",
"'included_template.__name__, '",
"'name=%r)'",
"%",
"(",
"frame",
".",
"symbols",
".",
"ref",
"(",
"alias",
")",
",",
"'the template %%r (imported on %s) does '",
"'not export the requested name %s'",
"%",
"(",
"self",
".",
"position",
"(",
"node",
")",
",",
"repr",
"(",
"name",
")",
")",
",",
"name",
")",
")",
"self",
".",
"outdent",
"(",
")",
"if",
"frame",
".",
"toplevel",
":",
"var_names",
".",
"append",
"(",
"alias",
")",
"if",
"not",
"alias",
".",
"startswith",
"(",
"'_'",
")",
":",
"discarded_names",
".",
"append",
"(",
"alias",
")",
"if",
"var_names",
":",
"if",
"len",
"(",
"var_names",
")",
"==",
"1",
":",
"name",
"=",
"var_names",
"[",
"0",
"]",
"self",
".",
"writeline",
"(",
"'context.vars[%r] = %s'",
"%",
"(",
"name",
",",
"frame",
".",
"symbols",
".",
"ref",
"(",
"name",
")",
")",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'context.vars.update({%s})'",
"%",
"', '",
".",
"join",
"(",
"'%r: %s'",
"%",
"(",
"name",
",",
"frame",
".",
"symbols",
".",
"ref",
"(",
"name",
")",
")",
"for",
"name",
"in",
"var_names",
")",
")",
"if",
"discarded_names",
":",
"if",
"len",
"(",
"discarded_names",
")",
"==",
"1",
":",
"self",
".",
"writeline",
"(",
"'context.exported_vars.discard(%r)'",
"%",
"discarded_names",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"writeline",
"(",
"'context.exported_vars.difference_'",
"'update((%s))'",
"%",
"', '",
".",
"join",
"(",
"imap",
"(",
"repr",
",",
"discarded_names",
")",
")",
")"
] | Visit named imports. | [
"Visit",
"named",
"imports",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/compiler.py#L965-L1022 |
25,716 | pypa/pipenv | pipenv/vendor/backports/weakref.py | finalize.atexit | def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit | python | def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit | [
"def",
"atexit",
"(",
"self",
")",
":",
"info",
"=",
"self",
".",
"_registry",
".",
"get",
"(",
"self",
")",
"return",
"bool",
"(",
"info",
")",
"and",
"info",
".",
"atexit"
] | Whether finalizer should be called at exit | [
"Whether",
"finalizer",
"should",
"be",
"called",
"at",
"exit"
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/backports/weakref.py#L91-L94 |
25,717 | pypa/pipenv | pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py | tostring | def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) | python | def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) | [
"def",
"tostring",
"(",
"element",
")",
":",
"rv",
"=",
"[",
"]",
"def",
"serializeElement",
"(",
"element",
")",
":",
"if",
"not",
"hasattr",
"(",
"element",
",",
"\"tag\"",
")",
":",
"if",
"element",
".",
"docinfo",
".",
"internalDTD",
":",
"if",
"element",
".",
"docinfo",
".",
"doctype",
":",
"dtd_str",
"=",
"element",
".",
"docinfo",
".",
"doctype",
"else",
":",
"dtd_str",
"=",
"\"<!DOCTYPE %s>\"",
"%",
"element",
".",
"docinfo",
".",
"root_name",
"rv",
".",
"append",
"(",
"dtd_str",
")",
"serializeElement",
"(",
"element",
".",
"getroot",
"(",
")",
")",
"elif",
"element",
".",
"tag",
"==",
"comment_type",
":",
"rv",
".",
"append",
"(",
"\"<!--%s-->\"",
"%",
"(",
"element",
".",
"text",
",",
")",
")",
"else",
":",
"# This is assumed to be an ordinary element",
"if",
"not",
"element",
".",
"attrib",
":",
"rv",
".",
"append",
"(",
"\"<%s>\"",
"%",
"(",
"element",
".",
"tag",
",",
")",
")",
"else",
":",
"attr",
"=",
"\" \"",
".",
"join",
"(",
"[",
"\"%s=\\\"%s\\\"\"",
"%",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"element",
".",
"attrib",
".",
"items",
"(",
")",
"]",
")",
"rv",
".",
"append",
"(",
"\"<%s %s>\"",
"%",
"(",
"element",
".",
"tag",
",",
"attr",
")",
")",
"if",
"element",
".",
"text",
":",
"rv",
".",
"append",
"(",
"element",
".",
"text",
")",
"for",
"child",
"in",
"element",
":",
"serializeElement",
"(",
"child",
")",
"rv",
".",
"append",
"(",
"\"</%s>\"",
"%",
"(",
"element",
".",
"tag",
",",
")",
")",
"if",
"hasattr",
"(",
"element",
",",
"\"tail\"",
")",
"and",
"element",
".",
"tail",
":",
"rv",
".",
"append",
"(",
"element",
".",
"tail",
")",
"serializeElement",
"(",
"element",
")",
"return",
"\"\"",
".",
"join",
"(",
"rv",
")"
] | Serialize an element and its child nodes to a string | [
"Serialize",
"an",
"element",
"and",
"its",
"child",
"nodes",
"to",
"a",
"string"
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py#L134-L172 |
25,718 | pypa/pipenv | pipenv/vendor/jinja2/visitor.py | NodeVisitor.get_visitor | def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None) | python | def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None) | [
"def",
"get_visitor",
"(",
"self",
",",
"node",
")",
":",
"method",
"=",
"'visit_'",
"+",
"node",
".",
"__class__",
".",
"__name__",
"return",
"getattr",
"(",
"self",
",",
"method",
",",
"None",
")"
] | Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead. | [
"Return",
"the",
"visitor",
"function",
"for",
"this",
"node",
"or",
"None",
"if",
"no",
"visitor",
"exists",
"for",
"this",
"node",
".",
"In",
"that",
"case",
"the",
"generic",
"visit",
"function",
"is",
"used",
"instead",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/visitor.py#L26-L32 |
25,719 | pypa/pipenv | pipenv/vendor/jinja2/visitor.py | NodeTransformer.visit_list | def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv | python | def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv | [
"def",
"visit_list",
"(",
"self",
",",
"node",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"rv",
"=",
"self",
".",
"visit",
"(",
"node",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"isinstance",
"(",
"rv",
",",
"list",
")",
":",
"rv",
"=",
"[",
"rv",
"]",
"return",
"rv"
] | As transformers may return lists in some places this method
can be used to enforce a list as return value. | [
"As",
"transformers",
"may",
"return",
"lists",
"in",
"some",
"places",
"this",
"method",
"can",
"be",
"used",
"to",
"enforce",
"a",
"list",
"as",
"return",
"value",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/visitor.py#L80-L87 |
25,720 | pypa/pipenv | pipenv/vendor/pep517/wrappers.py | Pep517HookCaller.build_wheel | def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
}) | python | def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
}) | [
"def",
"build_wheel",
"(",
"self",
",",
"wheel_directory",
",",
"config_settings",
"=",
"None",
",",
"metadata_directory",
"=",
"None",
")",
":",
"if",
"metadata_directory",
"is",
"not",
"None",
":",
"metadata_directory",
"=",
"abspath",
"(",
"metadata_directory",
")",
"return",
"self",
".",
"_call_hook",
"(",
"'build_wheel'",
",",
"{",
"'wheel_directory'",
":",
"abspath",
"(",
"wheel_directory",
")",
",",
"'config_settings'",
":",
"config_settings",
",",
"'metadata_directory'",
":",
"metadata_directory",
",",
"}",
")"
] | Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory. | [
"Build",
"a",
"wheel",
"from",
"this",
"project",
"."
] | cae8d76c210b9777e90aab76e9c4b0e53bb19cde | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pep517/wrappers.py#L89-L107 |
25,721 | Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet/resnet152_load.py | resnet18 | def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | python | def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | [
"def",
"resnet18",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"ResNet",
"(",
"BasicBlock",
",",
"[",
"2",
",",
"2",
",",
"2",
",",
"2",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'resnet18'",
"]",
")",
")",
"return",
"model"
] | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | [
"Constructs",
"a",
"ResNet",
"-",
"18",
"model",
"."
] | 021d97897c9aa76ec759deff43d341c4fd45d7ba | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet/resnet152_load.py#L160-L169 |
25,722 | Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet.py | fbresnet152 | def fbresnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['fbresnet152'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | python | def fbresnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['fbresnet152'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | [
"def",
"fbresnet152",
"(",
"num_classes",
"=",
"1000",
",",
"pretrained",
"=",
"'imagenet'",
")",
":",
"model",
"=",
"FBResNet",
"(",
"Bottleneck",
",",
"[",
"3",
",",
"8",
",",
"36",
",",
"3",
"]",
",",
"num_classes",
"=",
"num_classes",
")",
"if",
"pretrained",
"is",
"not",
"None",
":",
"settings",
"=",
"pretrained_settings",
"[",
"'fbresnet152'",
"]",
"[",
"pretrained",
"]",
"assert",
"num_classes",
"==",
"settings",
"[",
"'num_classes'",
"]",
",",
"\"num_classes should be {}, but is {}\"",
".",
"format",
"(",
"settings",
"[",
"'num_classes'",
"]",
",",
"num_classes",
")",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"settings",
"[",
"'url'",
"]",
")",
")",
"model",
".",
"input_space",
"=",
"settings",
"[",
"'input_space'",
"]",
"model",
".",
"input_size",
"=",
"settings",
"[",
"'input_size'",
"]",
"model",
".",
"input_range",
"=",
"settings",
"[",
"'input_range'",
"]",
"model",
".",
"mean",
"=",
"settings",
"[",
"'mean'",
"]",
"model",
".",
"std",
"=",
"settings",
"[",
"'std'",
"]",
"return",
"model"
] | Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | [
"Constructs",
"a",
"ResNet",
"-",
"152",
"model",
"."
] | 021d97897c9aa76ec759deff43d341c4fd45d7ba | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet.py#L216-L233 |
25,723 | Cadene/pretrained-models.pytorch | pretrainedmodels/models/dpn.py | adaptive_avgmax_pool2d | def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x | python | def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x | [
"def",
"adaptive_avgmax_pool2d",
"(",
"x",
",",
"pool_type",
"=",
"'avg'",
",",
"padding",
"=",
"0",
",",
"count_include_pad",
"=",
"False",
")",
":",
"if",
"pool_type",
"==",
"'avgmaxc'",
":",
"x",
"=",
"torch",
".",
"cat",
"(",
"[",
"F",
".",
"avg_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
",",
"count_include_pad",
"=",
"count_include_pad",
")",
",",
"F",
".",
"max_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
")",
"]",
",",
"dim",
"=",
"1",
")",
"elif",
"pool_type",
"==",
"'avgmax'",
":",
"x_avg",
"=",
"F",
".",
"avg_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
",",
"count_include_pad",
"=",
"count_include_pad",
")",
"x_max",
"=",
"F",
".",
"max_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
")",
"x",
"=",
"0.5",
"*",
"(",
"x_avg",
"+",
"x_max",
")",
"elif",
"pool_type",
"==",
"'max'",
":",
"x",
"=",
"F",
".",
"max_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
")",
"else",
":",
"if",
"pool_type",
"!=",
"'avg'",
":",
"print",
"(",
"'Invalid pool type %s specified. Defaulting to average pooling.'",
"%",
"pool_type",
")",
"x",
"=",
"F",
".",
"avg_pool2d",
"(",
"x",
",",
"kernel_size",
"=",
"(",
"x",
".",
"size",
"(",
"2",
")",
",",
"x",
".",
"size",
"(",
"3",
")",
")",
",",
"padding",
"=",
"padding",
",",
"count_include_pad",
"=",
"count_include_pad",
")",
"return",
"x"
] | Selectable global pooling function with dynamic input kernel size | [
"Selectable",
"global",
"pooling",
"function",
"with",
"dynamic",
"input",
"kernel",
"size"
] | 021d97897c9aa76ec759deff43d341c4fd45d7ba | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/dpn.py#L407-L428 |
25,724 | Cadene/pretrained-models.pytorch | pretrainedmodels/datasets/utils.py | download_url | def download_url(url, destination=None, progress_bar=True):
"""Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm
"""
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination) | python | def download_url(url, destination=None, progress_bar=True):
"""Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm
"""
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination) | [
"def",
"download_url",
"(",
"url",
",",
"destination",
"=",
"None",
",",
"progress_bar",
"=",
"True",
")",
":",
"def",
"my_hook",
"(",
"t",
")",
":",
"last_b",
"=",
"[",
"0",
"]",
"def",
"inner",
"(",
"b",
"=",
"1",
",",
"bsize",
"=",
"1",
",",
"tsize",
"=",
"None",
")",
":",
"if",
"tsize",
"is",
"not",
"None",
":",
"t",
".",
"total",
"=",
"tsize",
"if",
"b",
">",
"0",
":",
"t",
".",
"update",
"(",
"(",
"b",
"-",
"last_b",
"[",
"0",
"]",
")",
"*",
"bsize",
")",
"last_b",
"[",
"0",
"]",
"=",
"b",
"return",
"inner",
"if",
"progress_bar",
":",
"with",
"tqdm",
"(",
"unit",
"=",
"'B'",
",",
"unit_scale",
"=",
"True",
",",
"miniters",
"=",
"1",
",",
"desc",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"as",
"t",
":",
"filename",
",",
"_",
"=",
"urlretrieve",
"(",
"url",
",",
"filename",
"=",
"destination",
",",
"reporthook",
"=",
"my_hook",
"(",
"t",
")",
")",
"else",
":",
"filename",
",",
"_",
"=",
"urlretrieve",
"(",
"url",
",",
"filename",
"=",
"destination",
")"
] | Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm | [
"Download",
"a",
"URL",
"to",
"a",
"local",
"file",
"."
] | 021d97897c9aa76ec759deff43d341c4fd45d7ba | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/datasets/utils.py#L45-L83 |
25,725 | quantopian/zipline | zipline/utils/cache.py | CachedObject.unwrap | def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value | python | def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value | [
"def",
"unwrap",
"(",
"self",
",",
"dt",
")",
":",
"expires",
"=",
"self",
".",
"_expires",
"if",
"expires",
"is",
"AlwaysExpired",
"or",
"expires",
"<",
"dt",
":",
"raise",
"Expired",
"(",
"self",
".",
"_expires",
")",
"return",
"self",
".",
"_value"
] | Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires. | [
"Get",
"the",
"cached",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cache.py#L67-L84 |
25,726 | quantopian/zipline | zipline/utils/cache.py | ExpiringCache.get | def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
self.cleanup(self._cache[key]._unsafe_get_value())
del self._cache[key]
raise KeyError(key) | python | def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
self.cleanup(self._cache[key]._unsafe_get_value())
del self._cache[key]
raise KeyError(key) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"dt",
")",
":",
"try",
":",
"return",
"self",
".",
"_cache",
"[",
"key",
"]",
".",
"unwrap",
"(",
"dt",
")",
"except",
"Expired",
":",
"self",
".",
"cleanup",
"(",
"self",
".",
"_cache",
"[",
"key",
"]",
".",
"_unsafe_get_value",
"(",
")",
")",
"del",
"self",
".",
"_cache",
"[",
"key",
"]",
"raise",
"KeyError",
"(",
"key",
")"
] | Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired. | [
"Get",
"the",
"value",
"of",
"a",
"cached",
"object",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cache.py#L131-L157 |
25,727 | quantopian/zipline | zipline/utils/cache.py | ExpiringCache.set | def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt) | python | def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt) | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
",",
"expiration_dt",
")",
":",
"self",
".",
"_cache",
"[",
"key",
"]",
"=",
"CachedObject",
"(",
"value",
",",
"expiration_dt",
")"
] | Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``. | [
"Adds",
"a",
"new",
"key",
"value",
"pair",
"to",
"the",
"cache",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cache.py#L159-L172 |
25,728 | quantopian/zipline | zipline/utils/cache.py | working_dir.ensure_dir | def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path | python | def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path | [
"def",
"ensure_dir",
"(",
"self",
",",
"*",
"path_parts",
")",
":",
"path",
"=",
"self",
".",
"getpath",
"(",
"*",
"path_parts",
")",
"ensure_directory",
"(",
"path",
")",
"return",
"path"
] | Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory. | [
"Ensures",
"a",
"subdirectory",
"of",
"the",
"working",
"directory",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cache.py#L358-L368 |
25,729 | quantopian/zipline | zipline/data/in_memory_daily_bars.py | verify_frames_aligned | def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
) | python | def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
) | [
"def",
"verify_frames_aligned",
"(",
"frames",
",",
"calendar",
")",
":",
"indexes",
"=",
"[",
"f",
".",
"index",
"for",
"f",
"in",
"frames",
"]",
"check_indexes_all_same",
"(",
"indexes",
",",
"message",
"=",
"\"DataFrame indexes don't match:\"",
")",
"columns",
"=",
"[",
"f",
".",
"columns",
"for",
"f",
"in",
"frames",
"]",
"check_indexes_all_same",
"(",
"columns",
",",
"message",
"=",
"\"DataFrame columns don't match:\"",
")",
"start",
",",
"end",
"=",
"indexes",
"[",
"0",
"]",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"cal_sessions",
"=",
"calendar",
".",
"sessions_in_range",
"(",
"start",
",",
"end",
")",
"check_indexes_all_same",
"(",
"[",
"indexes",
"[",
"0",
"]",
",",
"cal_sessions",
"]",
",",
"\"DataFrame index doesn't match {} calendar:\"",
".",
"format",
"(",
"calendar",
".",
"name",
")",
",",
")"
] | Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``. | [
"Verify",
"that",
"DataFrames",
"in",
"frames",
"have",
"the",
"same",
"indexing",
"scheme",
"and",
"are",
"aligned",
"to",
"calendar",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/in_memory_daily_bars.py#L124-L152 |
25,730 | quantopian/zipline | zipline/utils/functional.py | same | def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest) | python | def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest) | [
"def",
"same",
"(",
"*",
"values",
")",
":",
"if",
"not",
"values",
":",
"return",
"True",
"first",
",",
"rest",
"=",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
":",
"]",
"return",
"all",
"(",
"value",
"==",
"first",
"for",
"value",
"in",
"rest",
")"
] | Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True | [
"Check",
"if",
"all",
"values",
"in",
"a",
"sequence",
"are",
"equal",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L88-L106 |
25,731 | quantopian/zipline | zipline/utils/functional.py | getattrs | def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value | python | def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value | [
"def",
"getattrs",
"(",
"value",
",",
"attrs",
",",
"default",
"=",
"_no_default",
")",
":",
"try",
":",
"for",
"attr",
"in",
"attrs",
":",
"value",
"=",
"getattr",
"(",
"value",
",",
"attr",
")",
"except",
"AttributeError",
":",
"if",
"default",
"is",
"_no_default",
":",
"raise",
"value",
"=",
"default",
"return",
"value"
] | Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default' | [
"Perform",
"a",
"chained",
"application",
"of",
"getattr",
"on",
"value",
"with",
"the",
"values",
"in",
"attrs",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L256-L303 |
25,732 | quantopian/zipline | zipline/utils/functional.py | set_attribute | def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator | python | def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator | [
"def",
"set_attribute",
"(",
"name",
",",
"value",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"setattr",
"(",
"f",
",",
"name",
",",
"value",
")",
"return",
"f",
"return",
"decorator"
] | Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo' | [
"Decorator",
"factory",
"for",
"setting",
"attributes",
"on",
"a",
"function",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L307-L327 |
25,733 | quantopian/zipline | zipline/utils/functional.py | foldr | def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
) | python | def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
) | [
"def",
"foldr",
"(",
"f",
",",
"seq",
",",
"default",
"=",
"_no_default",
")",
":",
"return",
"reduce",
"(",
"flip",
"(",
"f",
")",
",",
"reversed",
"(",
"seq",
")",
",",
"*",
"(",
"default",
",",
")",
"if",
"default",
"is",
"not",
"_no_default",
"else",
"(",
")",
")"
] | Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum` | [
"Fold",
"a",
"function",
"over",
"a",
"sequence",
"with",
"right",
"associativity",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L337-L393 |
25,734 | quantopian/zipline | zipline/utils/functional.py | invert | def invert(d):
"""
Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}}
"""
out = {}
for k, v in iteritems(d):
try:
out[v].add(k)
except KeyError:
out[v] = {k}
return out | python | def invert(d):
"""
Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}}
"""
out = {}
for k, v in iteritems(d):
try:
out[v].add(k)
except KeyError:
out[v] = {k}
return out | [
"def",
"invert",
"(",
"d",
")",
":",
"out",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"d",
")",
":",
"try",
":",
"out",
"[",
"v",
"]",
".",
"add",
"(",
"k",
")",
"except",
"KeyError",
":",
"out",
"[",
"v",
"]",
"=",
"{",
"k",
"}",
"return",
"out"
] | Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}} | [
"Invert",
"a",
"dictionary",
"into",
"a",
"dictionary",
"of",
"sets",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L396-L409 |
25,735 | quantopian/zipline | zipline/examples/olmar.py | simplex_projection | def simplex_projection(v, b=1):
r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w | python | def simplex_projection(v, b=1):
r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w | [
"def",
"simplex_projection",
"(",
"v",
",",
"b",
"=",
"1",
")",
":",
"v",
"=",
"np",
".",
"asarray",
"(",
"v",
")",
"p",
"=",
"len",
"(",
"v",
")",
"# Sort v into u in descending order",
"v",
"=",
"(",
"v",
">",
"0",
")",
"*",
"v",
"u",
"=",
"np",
".",
"sort",
"(",
"v",
")",
"[",
":",
":",
"-",
"1",
"]",
"sv",
"=",
"np",
".",
"cumsum",
"(",
"u",
")",
"rho",
"=",
"np",
".",
"where",
"(",
"u",
">",
"(",
"sv",
"-",
"b",
")",
"/",
"np",
".",
"arange",
"(",
"1",
",",
"p",
"+",
"1",
")",
")",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"theta",
"=",
"np",
".",
"max",
"(",
"[",
"0",
",",
"(",
"sv",
"[",
"rho",
"]",
"-",
"b",
")",
"/",
"(",
"rho",
"+",
"1",
")",
"]",
")",
"w",
"=",
"(",
"v",
"-",
"theta",
")",
"w",
"[",
"w",
"<",
"0",
"]",
"=",
"0",
"return",
"w"
] | r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com). | [
"r",
"Projection",
"vectors",
"to",
"the",
"simplex",
"domain"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/examples/olmar.py#L111-L146 |
25,736 | quantopian/zipline | zipline/examples/__init__.py | run_example | def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) | python | def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) | [
"def",
"run_example",
"(",
"example_name",
",",
"environ",
")",
":",
"mod",
"=",
"EXAMPLE_MODULES",
"[",
"example_name",
"]",
"register_calendar",
"(",
"\"YAHOO\"",
",",
"get_calendar",
"(",
"\"NYSE\"",
")",
",",
"force",
"=",
"True",
")",
"return",
"run_algorithm",
"(",
"initialize",
"=",
"getattr",
"(",
"mod",
",",
"'initialize'",
",",
"None",
")",
",",
"handle_data",
"=",
"getattr",
"(",
"mod",
",",
"'handle_data'",
",",
"None",
")",
",",
"before_trading_start",
"=",
"getattr",
"(",
"mod",
",",
"'before_trading_start'",
",",
"None",
")",
",",
"analyze",
"=",
"getattr",
"(",
"mod",
",",
"'analyze'",
",",
"None",
")",
",",
"bundle",
"=",
"'test'",
",",
"environ",
"=",
"environ",
",",
"# Provide a default capital base, but allow the test to override.",
"*",
"*",
"merge",
"(",
"{",
"'capital_base'",
":",
"1e7",
"}",
",",
"mod",
".",
"_test_args",
"(",
")",
")",
")"
] | Run an example module from zipline.examples. | [
"Run",
"an",
"example",
"module",
"from",
"zipline",
".",
"examples",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/examples/__init__.py#L64-L81 |
25,737 | quantopian/zipline | zipline/pipeline/factors/statistical.py | vectorized_beta | def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in both ``dependents`` and
``independents`` will output NaN as the regression coefficient.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out | python | def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in both ``dependents`` and
``independents`` will output NaN as the regression coefficient.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out | [
"def",
"vectorized_beta",
"(",
"dependents",
",",
"independent",
",",
"allowed_missing",
",",
"out",
"=",
"None",
")",
":",
"# Cache these as locals since we're going to call them multiple times.",
"nan",
"=",
"np",
".",
"nan",
"isnan",
"=",
"np",
".",
"isnan",
"N",
",",
"M",
"=",
"dependents",
".",
"shape",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"np",
".",
"full",
"(",
"M",
",",
"nan",
")",
"# Copy N times as a column vector and fill with nans to have the same",
"# missing value pattern as the dependent variable.",
"#",
"# PERF_TODO: We could probably avoid the space blowup by doing this in",
"# Cython.",
"# shape: (N, M)",
"independent",
"=",
"np",
".",
"where",
"(",
"isnan",
"(",
"dependents",
")",
",",
"nan",
",",
"independent",
",",
")",
"# Calculate beta as Cov(X, Y) / Cov(X, X).",
"# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa",
"#",
"# NOTE: The usual formula for covariance is::",
"#",
"# mean((X - mean(X)) * (Y - mean(Y)))",
"#",
"# However, we don't actually need to take the mean of both sides of the",
"# product, because of the folllowing equivalence::",
"#",
"# Let X_res = (X - mean(X)).",
"# We have:",
"#",
"# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))",
"# (1) = mean((X_res * Y) - (X_res * mean(Y)))",
"# (2) = mean(X_res * Y) - mean(X_res * mean(Y))",
"# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)",
"# (4) = mean(X_res * Y) - 0 * mean(Y)",
"# (5) = mean(X_res * Y)",
"#",
"#",
"# The tricky step in the above derivation is step (4). We know that",
"# mean(X_res) is zero because, for any X:",
"#",
"# mean(X - mean(X)) = mean(X) - mean(X) = 0.",
"#",
"# The upshot of this is that we only have to center one of `independent`",
"# and `dependent` when calculating covariances. Since we need the centered",
"# `independent` to calculate its variance in the next step, we choose to",
"# center `independent`.",
"# shape: (N, M)",
"ind_residual",
"=",
"independent",
"-",
"nanmean",
"(",
"independent",
",",
"axis",
"=",
"0",
")",
"# shape: (M,)",
"covariances",
"=",
"nanmean",
"(",
"ind_residual",
"*",
"dependents",
",",
"axis",
"=",
"0",
")",
"# We end up with different variances in each column here because each",
"# column may have a different subset of the data dropped due to missing",
"# data in the corresponding dependent column.",
"# shape: (M,)",
"independent_variances",
"=",
"nanmean",
"(",
"ind_residual",
"**",
"2",
",",
"axis",
"=",
"0",
")",
"# shape: (M,)",
"np",
".",
"divide",
"(",
"covariances",
",",
"independent_variances",
",",
"out",
"=",
"out",
")",
"# Write nans back to locations where we have more then allowed number of",
"# missing entries.",
"nanlocs",
"=",
"isnan",
"(",
"independent",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
">",
"allowed_missing",
"out",
"[",
"nanlocs",
"]",
"=",
"nan",
"return",
"out"
] | Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in both ``dependents`` and
``independents`` will output NaN as the regression coefficient.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``. | [
"Compute",
"slopes",
"of",
"linear",
"regressions",
"between",
"columns",
"of",
"dependents",
"and",
"independent",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/statistical.py#L572-L665 |
25,738 | quantopian/zipline | zipline/data/treasuries_can.py | _format_url | def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
"""
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
) | python | def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
"""
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
) | [
"def",
"_format_url",
"(",
"instrument_type",
",",
"instrument_ids",
",",
"start_date",
",",
"end_date",
",",
"earliest_allowed_date",
")",
":",
"return",
"(",
"\"http://www.bankofcanada.ca/stats/results/csv\"",
"\"?lP=lookup_{instrument_type}_yields.php\"",
"\"&sR={restrict}\"",
"\"&se={instrument_ids}\"",
"\"&dF={start}\"",
"\"&dT={end}\"",
".",
"format",
"(",
"instrument_type",
"=",
"instrument_type",
",",
"instrument_ids",
"=",
"'-'",
".",
"join",
"(",
"map",
"(",
"prepend",
"(",
"\"L_\"",
")",
",",
"instrument_ids",
")",
")",
",",
"restrict",
"=",
"earliest_allowed_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
"start",
"=",
"start_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
"end",
"=",
"end_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
")",
")"
] | Format a URL for loading data from Bank of Canada. | [
"Format",
"a",
"URL",
"for",
"loading",
"data",
"from",
"Bank",
"of",
"Canada",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries_can.py#L39-L60 |
25,739 | quantopian/zipline | zipline/data/treasuries_can.py | load_frame | def load_frame(url, skiprows):
"""
Load a DataFrame of data from a Bank of Canada site.
"""
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES) | python | def load_frame(url, skiprows):
"""
Load a DataFrame of data from a Bank of Canada site.
"""
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES) | [
"def",
"load_frame",
"(",
"url",
",",
"skiprows",
")",
":",
"return",
"pd",
".",
"read_csv",
"(",
"url",
",",
"skiprows",
"=",
"skiprows",
",",
"skipinitialspace",
"=",
"True",
",",
"na_values",
"=",
"[",
"\"Bank holiday\"",
",",
"\"Not available\"",
"]",
",",
"parse_dates",
"=",
"[",
"\"Date\"",
"]",
",",
"index_col",
"=",
"\"Date\"",
",",
")",
".",
"dropna",
"(",
"how",
"=",
"'all'",
")",
".",
"tz_localize",
"(",
"'UTC'",
")",
".",
"rename",
"(",
"columns",
"=",
"COLUMN_NAMES",
")"
] | Load a DataFrame of data from a Bank of Canada site. | [
"Load",
"a",
"DataFrame",
"of",
"data",
"from",
"a",
"Bank",
"of",
"Canada",
"site",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries_can.py#L67-L80 |
25,740 | quantopian/zipline | zipline/data/treasuries_can.py | check_known_inconsistencies | def check_known_inconsistencies(bill_data, bond_data):
"""
There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download.
"""
inconsistent_dates = bill_data.index.sym_diff(bond_data.index)
known_inconsistencies = [
# bill_data has an entry for 2010-02-15, which bond_data doesn't.
# bond_data has an entry for 2006-09-04, which bill_data doesn't.
# Both of these dates are bank holidays (Flag Day and Labor Day,
# respectively).
pd.Timestamp('2006-09-04', tz='UTC'),
pd.Timestamp('2010-02-15', tz='UTC'),
# 2013-07-25 comes back as "Not available" from the bills endpoint.
# This date doesn't seem to be a bank holiday, but the previous
# calendar implementation dropped this entry, so we drop it as well.
# If someone cares deeply about the integrity of the Canadian trading
# calendar, they may want to consider forward-filling here rather than
# dropping the row.
pd.Timestamp('2013-07-25', tz='UTC'),
]
unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)
if len(unexpected_inconsistences):
in_bills = bill_data.index.difference(bond_data.index).difference(
known_inconsistencies
)
in_bonds = bond_data.index.difference(bill_data.index).difference(
known_inconsistencies
)
raise ValueError(
"Inconsistent dates for Canadian treasury bills vs bonds. \n"
"Dates with bills but not bonds: {in_bills}.\n"
"Dates with bonds but not bills: {in_bonds}.".format(
in_bills=in_bills,
in_bonds=in_bonds,
)
) | python | def check_known_inconsistencies(bill_data, bond_data):
"""
There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download.
"""
inconsistent_dates = bill_data.index.sym_diff(bond_data.index)
known_inconsistencies = [
# bill_data has an entry for 2010-02-15, which bond_data doesn't.
# bond_data has an entry for 2006-09-04, which bill_data doesn't.
# Both of these dates are bank holidays (Flag Day and Labor Day,
# respectively).
pd.Timestamp('2006-09-04', tz='UTC'),
pd.Timestamp('2010-02-15', tz='UTC'),
# 2013-07-25 comes back as "Not available" from the bills endpoint.
# This date doesn't seem to be a bank holiday, but the previous
# calendar implementation dropped this entry, so we drop it as well.
# If someone cares deeply about the integrity of the Canadian trading
# calendar, they may want to consider forward-filling here rather than
# dropping the row.
pd.Timestamp('2013-07-25', tz='UTC'),
]
unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)
if len(unexpected_inconsistences):
in_bills = bill_data.index.difference(bond_data.index).difference(
known_inconsistencies
)
in_bonds = bond_data.index.difference(bill_data.index).difference(
known_inconsistencies
)
raise ValueError(
"Inconsistent dates for Canadian treasury bills vs bonds. \n"
"Dates with bills but not bonds: {in_bills}.\n"
"Dates with bonds but not bills: {in_bonds}.".format(
in_bills=in_bills,
in_bonds=in_bonds,
)
) | [
"def",
"check_known_inconsistencies",
"(",
"bill_data",
",",
"bond_data",
")",
":",
"inconsistent_dates",
"=",
"bill_data",
".",
"index",
".",
"sym_diff",
"(",
"bond_data",
".",
"index",
")",
"known_inconsistencies",
"=",
"[",
"# bill_data has an entry for 2010-02-15, which bond_data doesn't.",
"# bond_data has an entry for 2006-09-04, which bill_data doesn't.",
"# Both of these dates are bank holidays (Flag Day and Labor Day,",
"# respectively).",
"pd",
".",
"Timestamp",
"(",
"'2006-09-04'",
",",
"tz",
"=",
"'UTC'",
")",
",",
"pd",
".",
"Timestamp",
"(",
"'2010-02-15'",
",",
"tz",
"=",
"'UTC'",
")",
",",
"# 2013-07-25 comes back as \"Not available\" from the bills endpoint.",
"# This date doesn't seem to be a bank holiday, but the previous",
"# calendar implementation dropped this entry, so we drop it as well.",
"# If someone cares deeply about the integrity of the Canadian trading",
"# calendar, they may want to consider forward-filling here rather than",
"# dropping the row.",
"pd",
".",
"Timestamp",
"(",
"'2013-07-25'",
",",
"tz",
"=",
"'UTC'",
")",
",",
"]",
"unexpected_inconsistences",
"=",
"inconsistent_dates",
".",
"drop",
"(",
"known_inconsistencies",
")",
"if",
"len",
"(",
"unexpected_inconsistences",
")",
":",
"in_bills",
"=",
"bill_data",
".",
"index",
".",
"difference",
"(",
"bond_data",
".",
"index",
")",
".",
"difference",
"(",
"known_inconsistencies",
")",
"in_bonds",
"=",
"bond_data",
".",
"index",
".",
"difference",
"(",
"bill_data",
".",
"index",
")",
".",
"difference",
"(",
"known_inconsistencies",
")",
"raise",
"ValueError",
"(",
"\"Inconsistent dates for Canadian treasury bills vs bonds. \\n\"",
"\"Dates with bills but not bonds: {in_bills}.\\n\"",
"\"Dates with bonds but not bills: {in_bonds}.\"",
".",
"format",
"(",
"in_bills",
"=",
"in_bills",
",",
"in_bonds",
"=",
"in_bonds",
",",
")",
")"
] | There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download. | [
"There",
"are",
"a",
"couple",
"quirks",
"in",
"the",
"data",
"provided",
"by",
"Bank",
"of",
"Canada",
".",
"Check",
"that",
"no",
"new",
"quirks",
"have",
"been",
"introduced",
"in",
"the",
"latest",
"download",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries_can.py#L83-L119 |
25,741 | quantopian/zipline | zipline/data/treasuries_can.py | earliest_possible_date | def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
today = pd.Timestamp('now', tz='UTC').normalize()
# Bank of Canada only has the last 10 years of data at any given time.
return today.replace(year=today.year - 10) | python | def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
today = pd.Timestamp('now', tz='UTC').normalize()
# Bank of Canada only has the last 10 years of data at any given time.
return today.replace(year=today.year - 10) | [
"def",
"earliest_possible_date",
"(",
")",
":",
"today",
"=",
"pd",
".",
"Timestamp",
"(",
"'now'",
",",
"tz",
"=",
"'UTC'",
")",
".",
"normalize",
"(",
")",
"# Bank of Canada only has the last 10 years of data at any given time.",
"return",
"today",
".",
"replace",
"(",
"year",
"=",
"today",
".",
"year",
"-",
"10",
")"
] | The earliest date for which we can load data from this module. | [
"The",
"earliest",
"date",
"for",
"which",
"we",
"can",
"load",
"data",
"from",
"this",
"module",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries_can.py#L122-L128 |
25,742 | quantopian/zipline | zipline/finance/slippage.py | fill_price_worse_than_limit_price | def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or \
(order.direction < 0 and fill_price < order.limit):
return True
return False | python | def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or \
(order.direction < 0 and fill_price < order.limit):
return True
return False | [
"def",
"fill_price_worse_than_limit_price",
"(",
"fill_price",
",",
"order",
")",
":",
"if",
"order",
".",
"limit",
":",
"# this is tricky! if an order with a limit price has reached",
"# the limit price, we will try to fill the order. do not fill",
"# these shares if the impacted price is worse than the limit",
"# price. return early to avoid creating the transaction.",
"# buy order is worse if the impacted price is greater than",
"# the limit price. sell order is worse if the impacted price",
"# is less than the limit price",
"if",
"(",
"order",
".",
"direction",
">",
"0",
"and",
"fill_price",
">",
"order",
".",
"limit",
")",
"or",
"(",
"order",
".",
"direction",
"<",
"0",
"and",
"fill_price",
"<",
"order",
".",
"limit",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell). | [
"Checks",
"whether",
"the",
"fill",
"price",
"is",
"worse",
"than",
"the",
"order",
"s",
"limit",
"price",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/slippage.py#L50-L80 |
25,743 | quantopian/zipline | zipline/finance/slippage.py | MarketImpactBase._get_window_data | def _get_window_data(self, data, asset, window_length):
"""
Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility)
"""
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
# Add a day because we want 'window_length' complete days,
# excluding the current day.
volume_history = data.history(
asset, 'volume', window_length + 1, '1d',
)
close_history = data.history(
asset, 'close', window_length + 1, '1d',
)
except HistoryWindowStartsBeforeData:
# If there is not enough data to do a full history call, return
# values as if there was no data.
return 0, np.NaN
# Exclude the first value of the percent change array because it is
# always just NaN.
close_volatility = close_history[:-1].pct_change()[1:].std(
skipna=False,
)
values = {
'volume': volume_history[:-1].mean(),
'close': close_volatility * SQRT_252,
}
self._window_data_cache.set(asset, values, data.current_session)
return values['volume'], values['close'] | python | def _get_window_data(self, data, asset, window_length):
"""
Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility)
"""
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
# Add a day because we want 'window_length' complete days,
# excluding the current day.
volume_history = data.history(
asset, 'volume', window_length + 1, '1d',
)
close_history = data.history(
asset, 'close', window_length + 1, '1d',
)
except HistoryWindowStartsBeforeData:
# If there is not enough data to do a full history call, return
# values as if there was no data.
return 0, np.NaN
# Exclude the first value of the percent change array because it is
# always just NaN.
close_volatility = close_history[:-1].pct_change()[1:].std(
skipna=False,
)
values = {
'volume': volume_history[:-1].mean(),
'close': close_volatility * SQRT_252,
}
self._window_data_cache.set(asset, values, data.current_session)
return values['volume'], values['close'] | [
"def",
"_get_window_data",
"(",
"self",
",",
"data",
",",
"asset",
",",
"window_length",
")",
":",
"try",
":",
"values",
"=",
"self",
".",
"_window_data_cache",
".",
"get",
"(",
"asset",
",",
"data",
".",
"current_session",
")",
"except",
"KeyError",
":",
"try",
":",
"# Add a day because we want 'window_length' complete days,",
"# excluding the current day.",
"volume_history",
"=",
"data",
".",
"history",
"(",
"asset",
",",
"'volume'",
",",
"window_length",
"+",
"1",
",",
"'1d'",
",",
")",
"close_history",
"=",
"data",
".",
"history",
"(",
"asset",
",",
"'close'",
",",
"window_length",
"+",
"1",
",",
"'1d'",
",",
")",
"except",
"HistoryWindowStartsBeforeData",
":",
"# If there is not enough data to do a full history call, return",
"# values as if there was no data.",
"return",
"0",
",",
"np",
".",
"NaN",
"# Exclude the first value of the percent change array because it is",
"# always just NaN.",
"close_volatility",
"=",
"close_history",
"[",
":",
"-",
"1",
"]",
".",
"pct_change",
"(",
")",
"[",
"1",
":",
"]",
".",
"std",
"(",
"skipna",
"=",
"False",
",",
")",
"values",
"=",
"{",
"'volume'",
":",
"volume_history",
"[",
":",
"-",
"1",
"]",
".",
"mean",
"(",
")",
",",
"'close'",
":",
"close_volatility",
"*",
"SQRT_252",
",",
"}",
"self",
".",
"_window_data_cache",
".",
"set",
"(",
"asset",
",",
"values",
",",
"data",
".",
"current_session",
")",
"return",
"values",
"[",
"'volume'",
"]",
",",
"values",
"[",
"'close'",
"]"
] | Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility) | [
"Internal",
"utility",
"method",
"to",
"return",
"the",
"trailing",
"mean",
"volume",
"over",
"the",
"past",
"window_length",
"days",
"and",
"volatility",
"of",
"close",
"prices",
"for",
"a",
"specific",
"asset",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/slippage.py#L399-L444 |
25,744 | quantopian/zipline | zipline/pipeline/term.py | _assert_valid_categorical_missing_value | def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) | python | def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) | [
"def",
"_assert_valid_categorical_missing_value",
"(",
"value",
")",
":",
"label_types",
"=",
"LabelArray",
".",
"SUPPORTED_SCALAR_TYPES",
"if",
"not",
"isinstance",
"(",
"value",
",",
"label_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Categorical terms must have missing values of type \"",
"\"{types}.\"",
".",
"format",
"(",
"types",
"=",
"' or '",
".",
"join",
"(",
"[",
"t",
".",
"__name__",
"for",
"t",
"in",
"label_types",
"]",
")",
",",
")",
")"
] | Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term. | [
"Check",
"that",
"value",
"is",
"a",
"valid",
"categorical",
"missing_value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/term.py#L861-L875 |
25,745 | quantopian/zipline | zipline/pipeline/term.py | Term._static_identity | def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params) | python | def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params) | [
"def",
"_static_identity",
"(",
"cls",
",",
"domain",
",",
"dtype",
",",
"missing_value",
",",
"window_safe",
",",
"ndim",
",",
"params",
")",
":",
"return",
"(",
"cls",
",",
"domain",
",",
"dtype",
",",
"missing_value",
",",
"window_safe",
",",
"ndim",
",",
"params",
")"
] | Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance. | [
"Return",
"the",
"identity",
"of",
"the",
"Term",
"that",
"would",
"be",
"constructed",
"from",
"the",
"given",
"arguments",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/term.py#L217-L235 |
25,746 | quantopian/zipline | zipline/pipeline/term.py | ComputableTerm.dependencies | def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out | python | def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out | [
"def",
"dependencies",
"(",
"self",
")",
":",
"extra_input_rows",
"=",
"max",
"(",
"0",
",",
"self",
".",
"window_length",
"-",
"1",
")",
"out",
"=",
"{",
"}",
"for",
"term",
"in",
"self",
".",
"inputs",
":",
"out",
"[",
"term",
"]",
"=",
"extra_input_rows",
"out",
"[",
"self",
".",
"mask",
"]",
"=",
"0",
"return",
"out"
] | The number of extra rows needed for each of our inputs to compute this
term. | [
"The",
"number",
"of",
"extra",
"rows",
"needed",
"for",
"each",
"of",
"our",
"inputs",
"to",
"compute",
"this",
"term",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/term.py#L613-L623 |
25,747 | quantopian/zipline | zipline/pipeline/term.py | ComputableTerm.to_workspace_value | def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values | python | def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values | [
"def",
"to_workspace_value",
"(",
"self",
",",
"result",
",",
"assets",
")",
":",
"return",
"result",
".",
"unstack",
"(",
")",
".",
"fillna",
"(",
"self",
".",
"missing_value",
")",
".",
"reindex",
"(",
"columns",
"=",
"assets",
",",
"fill_value",
"=",
"self",
".",
"missing_value",
",",
")",
".",
"values"
] | Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume. | [
"Called",
"with",
"a",
"column",
"of",
"the",
"result",
"of",
"a",
"pipeline",
".",
"This",
"needs",
"to",
"put",
"the",
"data",
"into",
"a",
"format",
"that",
"can",
"be",
"used",
"in",
"a",
"workspace",
"to",
"continue",
"doing",
"computations",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/term.py#L638-L661 |
25,748 | quantopian/zipline | zipline/finance/position.py | Position.earn_stock_dividend | def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_count': np.floor(
self.amount * float(stock_dividend.ratio)
)
} | python | def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_count': np.floor(
self.amount * float(stock_dividend.ratio)
)
} | [
"def",
"earn_stock_dividend",
"(",
"self",
",",
"stock_dividend",
")",
":",
"return",
"{",
"'payment_asset'",
":",
"stock_dividend",
".",
"payment_asset",
",",
"'share_count'",
":",
"np",
".",
"floor",
"(",
"self",
".",
"amount",
"*",
"float",
"(",
"stock_dividend",
".",
"ratio",
")",
")",
"}"
] | Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date. | [
"Register",
"the",
"number",
"of",
"shares",
"we",
"held",
"at",
"this",
"dividend",
"s",
"ex",
"date",
"so",
"that",
"we",
"can",
"pay",
"out",
"the",
"correct",
"amount",
"on",
"the",
"dividend",
"s",
"pay",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/position.py#L79-L89 |
25,749 | quantopian/zipline | zipline/finance/position.py | Position.handle_split | def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash | python | def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash | [
"def",
"handle_split",
"(",
"self",
",",
"asset",
",",
"ratio",
")",
":",
"if",
"self",
".",
"asset",
"!=",
"asset",
":",
"raise",
"Exception",
"(",
"\"updating split with the wrong asset!\"",
")",
"# adjust the # of shares by the ratio",
"# (if we had 100 shares, and the ratio is 3,",
"# we now have 33 shares)",
"# (old_share_count / ratio = new_share_count)",
"# (old_price * ratio = new_price)",
"# e.g., 33.333",
"raw_share_count",
"=",
"self",
".",
"amount",
"/",
"float",
"(",
"ratio",
")",
"# e.g., 33",
"full_share_count",
"=",
"np",
".",
"floor",
"(",
"raw_share_count",
")",
"# e.g., 0.333",
"fractional_share_count",
"=",
"raw_share_count",
"-",
"full_share_count",
"# adjust the cost basis to the nearest cent, e.g., 60.0",
"new_cost_basis",
"=",
"round",
"(",
"self",
".",
"cost_basis",
"*",
"ratio",
",",
"2",
")",
"self",
".",
"cost_basis",
"=",
"new_cost_basis",
"self",
".",
"amount",
"=",
"full_share_count",
"return_cash",
"=",
"round",
"(",
"float",
"(",
"fractional_share_count",
"*",
"new_cost_basis",
")",
",",
"2",
")",
"log",
".",
"info",
"(",
"\"after split: \"",
"+",
"str",
"(",
"self",
")",
")",
"log",
".",
"info",
"(",
"\"returning cash: \"",
"+",
"str",
"(",
"return_cash",
")",
")",
"# return the leftover cash, which will be converted into cash",
"# (rounded to the nearest cent)",
"return",
"return_cash"
] | Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash. | [
"Update",
"the",
"position",
"by",
"the",
"split",
"ratio",
"and",
"return",
"the",
"resulting",
"fractional",
"share",
"that",
"will",
"be",
"converted",
"into",
"cash",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/position.py#L91-L129 |
25,750 | quantopian/zipline | zipline/utils/deprecate.py | deprecated | def deprecated(msg=None, stacklevel=2):
"""
Used to mark a function as deprecated.
Parameters
----------
msg : str
The message to display in the deprecation warning.
stacklevel : int
How far up the stack the warning needs to go, before
showing the relevant calling lines.
Examples
--------
@deprecated(msg='function_a is deprecated! Use function_b instead.')
def function_a(*args, **kwargs):
"""
def deprecated_dec(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(
msg or "Function %s is deprecated." % fn.__name__,
category=DeprecationWarning,
stacklevel=stacklevel
)
return fn(*args, **kwargs)
return wrapper
return deprecated_dec | python | def deprecated(msg=None, stacklevel=2):
"""
Used to mark a function as deprecated.
Parameters
----------
msg : str
The message to display in the deprecation warning.
stacklevel : int
How far up the stack the warning needs to go, before
showing the relevant calling lines.
Examples
--------
@deprecated(msg='function_a is deprecated! Use function_b instead.')
def function_a(*args, **kwargs):
"""
def deprecated_dec(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(
msg or "Function %s is deprecated." % fn.__name__,
category=DeprecationWarning,
stacklevel=stacklevel
)
return fn(*args, **kwargs)
return wrapper
return deprecated_dec | [
"def",
"deprecated",
"(",
"msg",
"=",
"None",
",",
"stacklevel",
"=",
"2",
")",
":",
"def",
"deprecated_dec",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"msg",
"or",
"\"Function %s is deprecated.\"",
"%",
"fn",
".",
"__name__",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"stacklevel",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"deprecated_dec"
] | Used to mark a function as deprecated.
Parameters
----------
msg : str
The message to display in the deprecation warning.
stacklevel : int
How far up the stack the warning needs to go, before
showing the relevant calling lines.
Examples
--------
@deprecated(msg='function_a is deprecated! Use function_b instead.')
def function_a(*args, **kwargs): | [
"Used",
"to",
"mark",
"a",
"function",
"as",
"deprecated",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/deprecate.py#L20-L47 |
25,751 | quantopian/zipline | zipline/data/history_loader.py | HistoryCompatibleUSEquityAdjustmentReader._get_adjustments_in_range | def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs | python | def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs | [
"def",
"_get_adjustments_in_range",
"(",
"self",
",",
"asset",
",",
"dts",
",",
"field",
")",
":",
"sid",
"=",
"int",
"(",
"asset",
")",
"start",
"=",
"normalize_date",
"(",
"dts",
"[",
"0",
"]",
")",
"end",
"=",
"normalize_date",
"(",
"dts",
"[",
"-",
"1",
"]",
")",
"adjs",
"=",
"{",
"}",
"if",
"field",
"!=",
"'volume'",
":",
"mergers",
"=",
"self",
".",
"_adjustments_reader",
".",
"get_adjustments_for_sid",
"(",
"'mergers'",
",",
"sid",
")",
"for",
"m",
"in",
"mergers",
":",
"dt",
"=",
"m",
"[",
"0",
"]",
"if",
"start",
"<",
"dt",
"<=",
"end",
":",
"end_loc",
"=",
"dts",
".",
"searchsorted",
"(",
"dt",
")",
"adj_loc",
"=",
"end_loc",
"mult",
"=",
"Float64Multiply",
"(",
"0",
",",
"end_loc",
"-",
"1",
",",
"0",
",",
"0",
",",
"m",
"[",
"1",
"]",
")",
"try",
":",
"adjs",
"[",
"adj_loc",
"]",
".",
"append",
"(",
"mult",
")",
"except",
"KeyError",
":",
"adjs",
"[",
"adj_loc",
"]",
"=",
"[",
"mult",
"]",
"divs",
"=",
"self",
".",
"_adjustments_reader",
".",
"get_adjustments_for_sid",
"(",
"'dividends'",
",",
"sid",
")",
"for",
"d",
"in",
"divs",
":",
"dt",
"=",
"d",
"[",
"0",
"]",
"if",
"start",
"<",
"dt",
"<=",
"end",
":",
"end_loc",
"=",
"dts",
".",
"searchsorted",
"(",
"dt",
")",
"adj_loc",
"=",
"end_loc",
"mult",
"=",
"Float64Multiply",
"(",
"0",
",",
"end_loc",
"-",
"1",
",",
"0",
",",
"0",
",",
"d",
"[",
"1",
"]",
")",
"try",
":",
"adjs",
"[",
"adj_loc",
"]",
".",
"append",
"(",
"mult",
")",
"except",
"KeyError",
":",
"adjs",
"[",
"adj_loc",
"]",
"=",
"[",
"mult",
"]",
"splits",
"=",
"self",
".",
"_adjustments_reader",
".",
"get_adjustments_for_sid",
"(",
"'splits'",
",",
"sid",
")",
"for",
"s",
"in",
"splits",
":",
"dt",
"=",
"s",
"[",
"0",
"]",
"if",
"start",
"<",
"dt",
"<=",
"end",
":",
"if",
"field",
"==",
"'volume'",
":",
"ratio",
"=",
"1.0",
"/",
"s",
"[",
"1",
"]",
"else",
":",
"ratio",
"=",
"s",
"[",
"1",
"]",
"end_loc",
"=",
"dts",
".",
"searchsorted",
"(",
"dt",
")",
"adj_loc",
"=",
"end_loc",
"mult",
"=",
"Float64Multiply",
"(",
"0",
",",
"end_loc",
"-",
"1",
",",
"0",
",",
"0",
",",
"ratio",
")",
"try",
":",
"adjs",
"[",
"adj_loc",
"]",
".",
"append",
"(",
"mult",
")",
"except",
"KeyError",
":",
"adjs",
"[",
"adj_loc",
"]",
"=",
"[",
"mult",
"]",
"return",
"adjs"
] | Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply | [
"Get",
"the",
"Float64Multiply",
"objects",
"to",
"pass",
"to",
"an",
"AdjustedArrayWindow",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/history_loader.py#L65-L151 |
25,752 | quantopian/zipline | zipline/data/history_loader.py | HistoryLoader.history | def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
) | python | def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
) | [
"def",
"history",
"(",
"self",
",",
"assets",
",",
"dts",
",",
"field",
",",
"is_perspective_after",
")",
":",
"block",
"=",
"self",
".",
"_ensure_sliding_windows",
"(",
"assets",
",",
"dts",
",",
"field",
",",
"is_perspective_after",
")",
"end_ix",
"=",
"self",
".",
"_calendar",
".",
"searchsorted",
"(",
"dts",
"[",
"-",
"1",
"]",
")",
"return",
"concatenate",
"(",
"[",
"window",
".",
"get",
"(",
"end_ix",
")",
"for",
"window",
"in",
"block",
"]",
",",
"axis",
"=",
"1",
",",
")"
] | A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets)) | [
"A",
"window",
"of",
"pricing",
"data",
"with",
"adjustments",
"applied",
"assuming",
"that",
"the",
"end",
"of",
"the",
"window",
"is",
"the",
"day",
"before",
"the",
"current",
"simulation",
"time",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/history_loader.py#L471-L555 |
25,753 | quantopian/zipline | zipline/sources/requests_csv.py | PandasCSV._lookup_unconflicted_symbol | def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(
uppered,
as_of_date=None,
country_code=self.country_code,
)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan | python | def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(
uppered,
as_of_date=None,
country_code=self.country_code,
)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan | [
"def",
"_lookup_unconflicted_symbol",
"(",
"self",
",",
"symbol",
")",
":",
"try",
":",
"uppered",
"=",
"symbol",
".",
"upper",
"(",
")",
"except",
"AttributeError",
":",
"# The mapping fails because symbol was a non-string",
"return",
"numpy",
".",
"nan",
"try",
":",
"return",
"self",
".",
"finder",
".",
"lookup_symbol",
"(",
"uppered",
",",
"as_of_date",
"=",
"None",
",",
"country_code",
"=",
"self",
".",
"country_code",
",",
")",
"except",
"MultipleSymbolsFound",
":",
"# Fill conflicted entries with zeros to mark that they need to be",
"# resolved by date.",
"return",
"0",
"except",
"SymbolNotFound",
":",
"# Fill not found entries with nans.",
"return",
"numpy",
".",
"nan"
] | Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN. | [
"Attempt",
"to",
"find",
"a",
"unique",
"asset",
"whose",
"symbol",
"is",
"the",
"given",
"string",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/sources/requests_csv.py#L262-L288 |
25,754 | quantopian/zipline | zipline/gens/tradesimulation.py | AlgorithmSimulator._cleanup_expired_assets | def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = \
[asset for asset in position_assets if past_auto_close_date(asset)]
metrics_tracker = algo.metrics_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
metrics_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their auto close
# date. These orders get processed immediately because otherwise they
# would not be processed until the first bar of the next day.
blotter = algo.blotter
assets_to_cancel = [
asset for asset in blotter.open_orders
if past_auto_close_date(asset)
]
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
# Make a copy here so that we are not modifying the list that is being
# iterated over.
for order in copy(blotter.new_orders):
if order.status == ORDER_STATUS.CANCELLED:
metrics_tracker.process_order(order)
blotter.new_orders.remove(order) | python | def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = \
[asset for asset in position_assets if past_auto_close_date(asset)]
metrics_tracker = algo.metrics_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
metrics_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their auto close
# date. These orders get processed immediately because otherwise they
# would not be processed until the first bar of the next day.
blotter = algo.blotter
assets_to_cancel = [
asset for asset in blotter.open_orders
if past_auto_close_date(asset)
]
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
# Make a copy here so that we are not modifying the list that is being
# iterated over.
for order in copy(blotter.new_orders):
if order.status == ORDER_STATUS.CANCELLED:
metrics_tracker.process_order(order)
blotter.new_orders.remove(order) | [
"def",
"_cleanup_expired_assets",
"(",
"self",
",",
"dt",
",",
"position_assets",
")",
":",
"algo",
"=",
"self",
".",
"algo",
"def",
"past_auto_close_date",
"(",
"asset",
")",
":",
"acd",
"=",
"asset",
".",
"auto_close_date",
"return",
"acd",
"is",
"not",
"None",
"and",
"acd",
"<=",
"dt",
"# Remove positions in any sids that have reached their auto_close date.",
"assets_to_clear",
"=",
"[",
"asset",
"for",
"asset",
"in",
"position_assets",
"if",
"past_auto_close_date",
"(",
"asset",
")",
"]",
"metrics_tracker",
"=",
"algo",
".",
"metrics_tracker",
"data_portal",
"=",
"self",
".",
"data_portal",
"for",
"asset",
"in",
"assets_to_clear",
":",
"metrics_tracker",
".",
"process_close_position",
"(",
"asset",
",",
"dt",
",",
"data_portal",
")",
"# Remove open orders for any sids that have reached their auto close",
"# date. These orders get processed immediately because otherwise they",
"# would not be processed until the first bar of the next day.",
"blotter",
"=",
"algo",
".",
"blotter",
"assets_to_cancel",
"=",
"[",
"asset",
"for",
"asset",
"in",
"blotter",
".",
"open_orders",
"if",
"past_auto_close_date",
"(",
"asset",
")",
"]",
"for",
"asset",
"in",
"assets_to_cancel",
":",
"blotter",
".",
"cancel_all_orders_for_asset",
"(",
"asset",
")",
"# Make a copy here so that we are not modifying the list that is being",
"# iterated over.",
"for",
"order",
"in",
"copy",
"(",
"blotter",
".",
"new_orders",
")",
":",
"if",
"order",
".",
"status",
"==",
"ORDER_STATUS",
".",
"CANCELLED",
":",
"metrics_tracker",
".",
"process_order",
"(",
"order",
")",
"blotter",
".",
"new_orders",
".",
"remove",
"(",
"order",
")"
] | Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date. | [
"Clear",
"out",
"any",
"assets",
"that",
"have",
"expired",
"before",
"starting",
"a",
"new",
"sim",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/tradesimulation.py#L238-L281 |
25,755 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentReader.load_adjustments | def load_adjustments(self,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type):
"""
Load collection of Adjustment objects from underlying adjustments db.
Parameters
----------
dates : pd.DatetimeIndex
Dates for which adjustments are needed.
assets : pd.Int64Index
Assets for which adjustments are needed.
should_include_splits : bool
Whether split adjustments should be included.
should_include_mergers : bool
Whether merger adjustments should be included.
should_include_dividends : bool
Whether dividend adjustments should be included.
adjustment_type : str
Whether price adjustments, volume adjustments, or both, should be
included in the output.
Returns
-------
adjustments : dict[str -> dict[int -> Adjustment]]
A dictionary containing price and/or volume adjustment mappings
from index to adjustment objects to apply at that index.
"""
return load_adjustments_from_sqlite(
self.conn,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type,
) | python | def load_adjustments(self,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type):
"""
Load collection of Adjustment objects from underlying adjustments db.
Parameters
----------
dates : pd.DatetimeIndex
Dates for which adjustments are needed.
assets : pd.Int64Index
Assets for which adjustments are needed.
should_include_splits : bool
Whether split adjustments should be included.
should_include_mergers : bool
Whether merger adjustments should be included.
should_include_dividends : bool
Whether dividend adjustments should be included.
adjustment_type : str
Whether price adjustments, volume adjustments, or both, should be
included in the output.
Returns
-------
adjustments : dict[str -> dict[int -> Adjustment]]
A dictionary containing price and/or volume adjustment mappings
from index to adjustment objects to apply at that index.
"""
return load_adjustments_from_sqlite(
self.conn,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type,
) | [
"def",
"load_adjustments",
"(",
"self",
",",
"dates",
",",
"assets",
",",
"should_include_splits",
",",
"should_include_mergers",
",",
"should_include_dividends",
",",
"adjustment_type",
")",
":",
"return",
"load_adjustments_from_sqlite",
"(",
"self",
".",
"conn",
",",
"dates",
",",
"assets",
",",
"should_include_splits",
",",
"should_include_mergers",
",",
"should_include_dividends",
",",
"adjustment_type",
",",
")"
] | Load collection of Adjustment objects from underlying adjustments db.
Parameters
----------
dates : pd.DatetimeIndex
Dates for which adjustments are needed.
assets : pd.Int64Index
Assets for which adjustments are needed.
should_include_splits : bool
Whether split adjustments should be included.
should_include_mergers : bool
Whether merger adjustments should be included.
should_include_dividends : bool
Whether dividend adjustments should be included.
adjustment_type : str
Whether price adjustments, volume adjustments, or both, should be
included in the output.
Returns
-------
adjustments : dict[str -> dict[int -> Adjustment]]
A dictionary containing price and/or volume adjustment mappings
from index to adjustment objects to apply at that index. | [
"Load",
"collection",
"of",
"Adjustment",
"objects",
"from",
"underlying",
"adjustments",
"db",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L142-L182 |
25,756 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentReader.unpack_db_to_component_dfs | def unpack_db_to_component_dfs(self, convert_dates=False):
"""Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime.
"""
return {
t_name: self.get_df_from_table(t_name, convert_dates)
for t_name in self._datetime_int_cols
} | python | def unpack_db_to_component_dfs(self, convert_dates=False):
"""Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime.
"""
return {
t_name: self.get_df_from_table(t_name, convert_dates)
for t_name in self._datetime_int_cols
} | [
"def",
"unpack_db_to_component_dfs",
"(",
"self",
",",
"convert_dates",
"=",
"False",
")",
":",
"return",
"{",
"t_name",
":",
"self",
".",
"get_df_from_table",
"(",
"t_name",
",",
"convert_dates",
")",
"for",
"t_name",
"in",
"self",
".",
"_datetime_int_cols",
"}"
] | Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime. | [
"Returns",
"the",
"set",
"of",
"known",
"tables",
"in",
"the",
"adjustments",
"file",
"in",
"DataFrame",
"form",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L268-L289 |
25,757 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentReader._df_dtypes | def _df_dtypes(self, table_name, convert_dates):
"""Get dtypes to use when unpacking sqlite tables as dataframes.
"""
out = self._raw_table_dtypes[table_name]
if convert_dates:
out = out.copy()
for date_column in self._datetime_int_cols[table_name]:
out[date_column] = datetime64ns_dtype
return out | python | def _df_dtypes(self, table_name, convert_dates):
"""Get dtypes to use when unpacking sqlite tables as dataframes.
"""
out = self._raw_table_dtypes[table_name]
if convert_dates:
out = out.copy()
for date_column in self._datetime_int_cols[table_name]:
out[date_column] = datetime64ns_dtype
return out | [
"def",
"_df_dtypes",
"(",
"self",
",",
"table_name",
",",
"convert_dates",
")",
":",
"out",
"=",
"self",
".",
"_raw_table_dtypes",
"[",
"table_name",
"]",
"if",
"convert_dates",
":",
"out",
"=",
"out",
".",
"copy",
"(",
")",
"for",
"date_column",
"in",
"self",
".",
"_datetime_int_cols",
"[",
"table_name",
"]",
":",
"out",
"[",
"date_column",
"]",
"=",
"datetime64ns_dtype",
"return",
"out"
] | Get dtypes to use when unpacking sqlite tables as dataframes. | [
"Get",
"dtypes",
"to",
"use",
"when",
"unpacking",
"sqlite",
"tables",
"as",
"dataframes",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L326-L335 |
25,758 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentWriter.calc_dividend_ratios | def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None or dividends.empty:
return pd.DataFrame(np.array(
[],
dtype=[
('sid', uint64_dtype),
('effective_date', uint32_dtype),
('ratio', float64_dtype),
],
))
pricing_reader = self._equity_daily_bar_reader
input_sids = dividends.sid.values
unique_sids, sids_ix = np.unique(input_sids, return_inverse=True)
dates = pricing_reader.sessions.values
close, = pricing_reader.load_raw_arrays(
['close'],
pd.Timestamp(dates[0], tz='UTC'),
pd.Timestamp(dates[-1], tz='UTC'),
unique_sids,
)
date_ix = np.searchsorted(dates, dividends.ex_date.values)
mask = date_ix > 0
date_ix = date_ix[mask]
sids_ix = sids_ix[mask]
input_dates = dividends.ex_date.values[mask]
# subtract one day to get the close on the day prior to the merger
previous_close = close[date_ix - 1, sids_ix]
input_sids = input_sids[mask]
amount = dividends.amount.values[mask]
ratio = 1.0 - amount / previous_close
non_nan_ratio_mask = ~np.isnan(ratio)
for ix in np.flatnonzero(~non_nan_ratio_mask):
log.warn(
"Couldn't compute ratio for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
positive_ratio_mask = ratio > 0
for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask):
log.warn(
"Dividend ratio <= 0 for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask
return pd.DataFrame({
'sid': input_sids[valid_ratio_mask],
'effective_date': input_dates[valid_ratio_mask],
'ratio': ratio[valid_ratio_mask],
}) | python | def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None or dividends.empty:
return pd.DataFrame(np.array(
[],
dtype=[
('sid', uint64_dtype),
('effective_date', uint32_dtype),
('ratio', float64_dtype),
],
))
pricing_reader = self._equity_daily_bar_reader
input_sids = dividends.sid.values
unique_sids, sids_ix = np.unique(input_sids, return_inverse=True)
dates = pricing_reader.sessions.values
close, = pricing_reader.load_raw_arrays(
['close'],
pd.Timestamp(dates[0], tz='UTC'),
pd.Timestamp(dates[-1], tz='UTC'),
unique_sids,
)
date_ix = np.searchsorted(dates, dividends.ex_date.values)
mask = date_ix > 0
date_ix = date_ix[mask]
sids_ix = sids_ix[mask]
input_dates = dividends.ex_date.values[mask]
# subtract one day to get the close on the day prior to the merger
previous_close = close[date_ix - 1, sids_ix]
input_sids = input_sids[mask]
amount = dividends.amount.values[mask]
ratio = 1.0 - amount / previous_close
non_nan_ratio_mask = ~np.isnan(ratio)
for ix in np.flatnonzero(~non_nan_ratio_mask):
log.warn(
"Couldn't compute ratio for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
positive_ratio_mask = ratio > 0
for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask):
log.warn(
"Dividend ratio <= 0 for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask
return pd.DataFrame({
'sid': input_sids[valid_ratio_mask],
'effective_date': input_dates[valid_ratio_mask],
'ratio': ratio[valid_ratio_mask],
}) | [
"def",
"calc_dividend_ratios",
"(",
"self",
",",
"dividends",
")",
":",
"if",
"dividends",
"is",
"None",
"or",
"dividends",
".",
"empty",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"[",
"(",
"'sid'",
",",
"uint64_dtype",
")",
",",
"(",
"'effective_date'",
",",
"uint32_dtype",
")",
",",
"(",
"'ratio'",
",",
"float64_dtype",
")",
",",
"]",
",",
")",
")",
"pricing_reader",
"=",
"self",
".",
"_equity_daily_bar_reader",
"input_sids",
"=",
"dividends",
".",
"sid",
".",
"values",
"unique_sids",
",",
"sids_ix",
"=",
"np",
".",
"unique",
"(",
"input_sids",
",",
"return_inverse",
"=",
"True",
")",
"dates",
"=",
"pricing_reader",
".",
"sessions",
".",
"values",
"close",
",",
"=",
"pricing_reader",
".",
"load_raw_arrays",
"(",
"[",
"'close'",
"]",
",",
"pd",
".",
"Timestamp",
"(",
"dates",
"[",
"0",
"]",
",",
"tz",
"=",
"'UTC'",
")",
",",
"pd",
".",
"Timestamp",
"(",
"dates",
"[",
"-",
"1",
"]",
",",
"tz",
"=",
"'UTC'",
")",
",",
"unique_sids",
",",
")",
"date_ix",
"=",
"np",
".",
"searchsorted",
"(",
"dates",
",",
"dividends",
".",
"ex_date",
".",
"values",
")",
"mask",
"=",
"date_ix",
">",
"0",
"date_ix",
"=",
"date_ix",
"[",
"mask",
"]",
"sids_ix",
"=",
"sids_ix",
"[",
"mask",
"]",
"input_dates",
"=",
"dividends",
".",
"ex_date",
".",
"values",
"[",
"mask",
"]",
"# subtract one day to get the close on the day prior to the merger",
"previous_close",
"=",
"close",
"[",
"date_ix",
"-",
"1",
",",
"sids_ix",
"]",
"input_sids",
"=",
"input_sids",
"[",
"mask",
"]",
"amount",
"=",
"dividends",
".",
"amount",
".",
"values",
"[",
"mask",
"]",
"ratio",
"=",
"1.0",
"-",
"amount",
"/",
"previous_close",
"non_nan_ratio_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"ratio",
")",
"for",
"ix",
"in",
"np",
".",
"flatnonzero",
"(",
"~",
"non_nan_ratio_mask",
")",
":",
"log",
".",
"warn",
"(",
"\"Couldn't compute ratio for dividend\"",
"\" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}\"",
",",
"sid",
"=",
"input_sids",
"[",
"ix",
"]",
",",
"ex_date",
"=",
"pd",
".",
"Timestamp",
"(",
"input_dates",
"[",
"ix",
"]",
")",
",",
"amount",
"=",
"amount",
"[",
"ix",
"]",
",",
")",
"positive_ratio_mask",
"=",
"ratio",
">",
"0",
"for",
"ix",
"in",
"np",
".",
"flatnonzero",
"(",
"~",
"positive_ratio_mask",
"&",
"non_nan_ratio_mask",
")",
":",
"log",
".",
"warn",
"(",
"\"Dividend ratio <= 0 for dividend\"",
"\" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}\"",
",",
"sid",
"=",
"input_sids",
"[",
"ix",
"]",
",",
"ex_date",
"=",
"pd",
".",
"Timestamp",
"(",
"input_dates",
"[",
"ix",
"]",
")",
",",
"amount",
"=",
"amount",
"[",
"ix",
"]",
",",
")",
"valid_ratio_mask",
"=",
"non_nan_ratio_mask",
"&",
"positive_ratio_mask",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'sid'",
":",
"input_sids",
"[",
"valid_ratio_mask",
"]",
",",
"'effective_date'",
":",
"input_dates",
"[",
"valid_ratio_mask",
"]",
",",
"'ratio'",
":",
"ratio",
"[",
"valid_ratio_mask",
"]",
",",
"}",
")"
] | Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data. | [
"Calculate",
"the",
"ratios",
"to",
"apply",
"to",
"equities",
"when",
"looking",
"back",
"at",
"pricing",
"history",
"so",
"that",
"the",
"price",
"is",
"smoothed",
"over",
"the",
"ex_date",
"when",
"the",
"market",
"adjusts",
"to",
"the",
"change",
"in",
"equity",
"value",
"due",
"to",
"upcoming",
"dividend",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L456-L530 |
25,759 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentWriter.write_dividend_data | def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios) | python | def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios) | [
"def",
"write_dividend_data",
"(",
"self",
",",
"dividends",
",",
"stock_dividends",
"=",
"None",
")",
":",
"# First write the dividend payouts.",
"self",
".",
"_write_dividends",
"(",
"dividends",
")",
"self",
".",
"_write_stock_dividends",
"(",
"stock_dividends",
")",
"# Second from the dividend payouts, calculate ratios.",
"dividend_ratios",
"=",
"self",
".",
"calc_dividend_ratios",
"(",
"dividends",
")",
"self",
".",
"write_frame",
"(",
"'dividends'",
",",
"dividend_ratios",
")"
] | Write both dividend payouts and the derived price adjustment ratios. | [
"Write",
"both",
"dividend",
"payouts",
"and",
"the",
"derived",
"price",
"adjustment",
"ratios",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L570-L581 |
25,760 | quantopian/zipline | zipline/data/adjustments.py | SQLiteAdjustmentWriter.write | def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.adjustments.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
# Use IF NOT EXISTS here to allow multiple writes if desired.
self.conn.execute(
"CREATE INDEX IF NOT EXISTS splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
) | python | def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.adjustments.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
# Use IF NOT EXISTS here to allow multiple writes if desired.
self.conn.execute(
"CREATE INDEX IF NOT EXISTS splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
) | [
"def",
"write",
"(",
"self",
",",
"splits",
"=",
"None",
",",
"mergers",
"=",
"None",
",",
"dividends",
"=",
"None",
",",
"stock_dividends",
"=",
"None",
")",
":",
"self",
".",
"write_frame",
"(",
"'splits'",
",",
"splits",
")",
"self",
".",
"write_frame",
"(",
"'mergers'",
",",
"mergers",
")",
"self",
".",
"write_dividend_data",
"(",
"dividends",
",",
"stock_dividends",
")",
"# Use IF NOT EXISTS here to allow multiple writes if desired.",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS splits_sids \"",
"\"ON splits(sid)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS splits_effective_date \"",
"\"ON splits(effective_date)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS mergers_sids \"",
"\"ON mergers(sid)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS mergers_effective_date \"",
"\"ON mergers(effective_date)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS dividends_sid \"",
"\"ON dividends(sid)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS dividends_effective_date \"",
"\"ON dividends(effective_date)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS dividend_payouts_sid \"",
"\"ON dividend_payouts(sid)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date \"",
"\"ON dividend_payouts(ex_date)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid \"",
"\"ON stock_dividend_payouts(sid)\"",
")",
"self",
".",
"conn",
".",
"execute",
"(",
"\"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date \"",
"\"ON stock_dividend_payouts(ex_date)\"",
")"
] | Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.adjustments.SQLiteAdjustmentReader | [
"Writes",
"data",
"to",
"a",
"SQLite",
"file",
"to",
"be",
"read",
"by",
"SQLiteAdjustmentReader",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/adjustments.py#L583-L703 |
25,761 | quantopian/zipline | zipline/pipeline/mixins.py | CustomTermMixin.compute | def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(
name=type(self).__name__
)
) | python | def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(
name=type(self).__name__
)
) | [
"def",
"compute",
"(",
"self",
",",
"today",
",",
"assets",
",",
"out",
",",
"*",
"arrays",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"{name} must define a compute method\"",
".",
"format",
"(",
"name",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")"
] | Override this method with a function that writes a value into `out`. | [
"Override",
"this",
"method",
"with",
"a",
"function",
"that",
"writes",
"a",
"value",
"into",
"out",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/mixins.py#L143-L151 |
25,762 | quantopian/zipline | zipline/pipeline/mixins.py | CustomTermMixin._compute | def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out | python | def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out | [
"def",
"_compute",
"(",
"self",
",",
"windows",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"format_inputs",
"=",
"self",
".",
"_format_inputs",
"compute",
"=",
"self",
".",
"compute",
"params",
"=",
"self",
".",
"params",
"ndim",
"=",
"self",
".",
"ndim",
"shape",
"=",
"(",
"len",
"(",
"mask",
")",
",",
"1",
")",
"if",
"ndim",
"==",
"1",
"else",
"mask",
".",
"shape",
"out",
"=",
"self",
".",
"_allocate_output",
"(",
"windows",
",",
"shape",
")",
"with",
"self",
".",
"ctx",
":",
"for",
"idx",
",",
"date",
"in",
"enumerate",
"(",
"dates",
")",
":",
"# Never apply a mask to 1D outputs.",
"out_mask",
"=",
"array",
"(",
"[",
"True",
"]",
")",
"if",
"ndim",
"==",
"1",
"else",
"mask",
"[",
"idx",
"]",
"# Mask our inputs as usual.",
"inputs_mask",
"=",
"mask",
"[",
"idx",
"]",
"masked_assets",
"=",
"assets",
"[",
"inputs_mask",
"]",
"out_row",
"=",
"out",
"[",
"idx",
"]",
"[",
"out_mask",
"]",
"inputs",
"=",
"format_inputs",
"(",
"windows",
",",
"inputs_mask",
")",
"compute",
"(",
"date",
",",
"masked_assets",
",",
"out_row",
",",
"*",
"inputs",
",",
"*",
"*",
"params",
")",
"out",
"[",
"idx",
"]",
"[",
"out_mask",
"]",
"=",
"out_row",
"return",
"out"
] | Call the user's `compute` function on each window with a pre-built
output array. | [
"Call",
"the",
"user",
"s",
"compute",
"function",
"on",
"each",
"window",
"with",
"a",
"pre",
"-",
"built",
"output",
"array",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/mixins.py#L193-L220 |
25,763 | quantopian/zipline | zipline/pipeline/mixins.py | DownsampledMixin.compute_extra_rows | def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos) | python | def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos) | [
"def",
"compute_extra_rows",
"(",
"self",
",",
"all_dates",
",",
"start_date",
",",
"end_date",
",",
"min_extra_rows",
")",
":",
"try",
":",
"current_start_pos",
"=",
"all_dates",
".",
"get_loc",
"(",
"start_date",
")",
"-",
"min_extra_rows",
"if",
"current_start_pos",
"<",
"0",
":",
"raise",
"NoFurtherDataError",
".",
"from_lookback_window",
"(",
"initial_message",
"=",
"\"Insufficient data to compute Pipeline:\"",
",",
"first_date",
"=",
"all_dates",
"[",
"0",
"]",
",",
"lookback_start",
"=",
"start_date",
",",
"lookback_length",
"=",
"min_extra_rows",
",",
")",
"except",
"KeyError",
":",
"before",
",",
"after",
"=",
"nearest_unequal_elements",
"(",
"all_dates",
",",
"start_date",
")",
"raise",
"ValueError",
"(",
"\"Pipeline start_date {start_date} is not in calendar.\\n\"",
"\"Latest date before start_date is {before}.\\n\"",
"\"Earliest date after start_date is {after}.\"",
".",
"format",
"(",
"start_date",
"=",
"start_date",
",",
"before",
"=",
"before",
",",
"after",
"=",
"after",
",",
")",
")",
"# Our possible target dates are all the dates on or before the current",
"# starting position.",
"# TODO: Consider bounding this below by self.window_length",
"candidates",
"=",
"all_dates",
"[",
":",
"current_start_pos",
"+",
"1",
"]",
"# Choose the latest date in the candidates that is the start of a new",
"# period at our frequency.",
"choices",
"=",
"select_sampling_indices",
"(",
"candidates",
",",
"self",
".",
"_frequency",
")",
"# If we have choices, the last choice is the first date if the",
"# period containing current_start_date. Choose it.",
"new_start_date",
"=",
"candidates",
"[",
"choices",
"[",
"-",
"1",
"]",
"]",
"# Add the difference between the new and old start dates to get the",
"# number of rows for the new start_date.",
"new_start_pos",
"=",
"all_dates",
".",
"get_loc",
"(",
"new_start_date",
")",
"assert",
"new_start_pos",
"<=",
"current_start_pos",
",",
"\"Computed negative extra rows!\"",
"return",
"min_extra_rows",
"+",
"(",
"current_start_pos",
"-",
"new_start_pos",
")"
] | Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date. | [
"Ensure",
"that",
"min_extra_rows",
"pushes",
"us",
"back",
"to",
"a",
"computation",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/mixins.py#L370-L437 |
25,764 | quantopian/zipline | zipline/pipeline/mixins.py | DownsampledMixin._compute | def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i:i + 1],
assets,
mask[i:i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results) | python | def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i:i + 1],
assets,
mask[i:i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results) | [
"def",
"_compute",
"(",
"self",
",",
"inputs",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"to_sample",
"=",
"dates",
"[",
"select_sampling_indices",
"(",
"dates",
",",
"self",
".",
"_frequency",
")",
"]",
"assert",
"to_sample",
"[",
"0",
"]",
"==",
"dates",
"[",
"0",
"]",
",",
"\"Misaligned sampling dates in %s.\"",
"%",
"type",
"(",
"self",
")",
".",
"__name__",
"real_compute",
"=",
"self",
".",
"_wrapped_term",
".",
"_compute",
"# Inputs will contain different kinds of values depending on whether or",
"# not we're a windowed computation.",
"# If we're windowed, then `inputs` is a list of iterators of ndarrays.",
"# If we're not windowed, then `inputs` is just a list of ndarrays.",
"# There are two things we care about doing with the input:",
"# 1. Preparing an input to be passed to our wrapped term.",
"# 2. Skipping an input if we're going to use an already-computed row.",
"# We perform these actions differently based on the expected kind of",
"# input, and we encapsulate these actions with closures so that we",
"# don't clutter the code below with lots of branching.",
"if",
"self",
".",
"windowed",
":",
"# If we're windowed, inputs are stateful AdjustedArrays. We don't",
"# need to do any preparation before forwarding to real_compute, but",
"# we need to call `next` on them if we want to skip an iteration.",
"def",
"prepare_inputs",
"(",
")",
":",
"return",
"inputs",
"def",
"skip_this_input",
"(",
")",
":",
"for",
"w",
"in",
"inputs",
":",
"next",
"(",
"w",
")",
"else",
":",
"# If we're not windowed, inputs are just ndarrays. We need to",
"# slice out a single row when forwarding to real_compute, but we",
"# don't need to do anything to skip an input.",
"def",
"prepare_inputs",
"(",
")",
":",
"# i is the loop iteration variable below.",
"return",
"[",
"a",
"[",
"[",
"i",
"]",
"]",
"for",
"a",
"in",
"inputs",
"]",
"def",
"skip_this_input",
"(",
")",
":",
"pass",
"results",
"=",
"[",
"]",
"samples",
"=",
"iter",
"(",
"to_sample",
")",
"next_sample",
"=",
"next",
"(",
"samples",
")",
"for",
"i",
",",
"compute_date",
"in",
"enumerate",
"(",
"dates",
")",
":",
"if",
"next_sample",
"==",
"compute_date",
":",
"results",
".",
"append",
"(",
"real_compute",
"(",
"prepare_inputs",
"(",
")",
",",
"dates",
"[",
"i",
":",
"i",
"+",
"1",
"]",
",",
"assets",
",",
"mask",
"[",
"i",
":",
"i",
"+",
"1",
"]",
",",
")",
")",
"try",
":",
"next_sample",
"=",
"next",
"(",
"samples",
")",
"except",
"StopIteration",
":",
"# No more samples to take. Set next_sample to Nat, which",
"# compares False with any other datetime.",
"next_sample",
"=",
"pd_NaT",
"else",
":",
"skip_this_input",
"(",
")",
"# Copy results from previous sample period.",
"results",
".",
"append",
"(",
"results",
"[",
"-",
"1",
"]",
")",
"# We should have exhausted our sample dates.",
"try",
":",
"next_sample",
"=",
"next",
"(",
"samples",
")",
"except",
"StopIteration",
":",
"pass",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Unconsumed sample date: %s\"",
"%",
"next_sample",
")",
"# Concatenate stored results.",
"return",
"vstack",
"(",
"results",
")"
] | Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples. | [
"Compute",
"by",
"delegating",
"to",
"self",
".",
"_wrapped_term",
".",
"_compute",
"on",
"sample",
"dates",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/mixins.py#L439-L516 |
25,765 | quantopian/zipline | zipline/utils/preprocess.py | preprocess | def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator | python | def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator | [
"def",
"preprocess",
"(",
"*",
"_unused",
",",
"*",
"*",
"processors",
")",
":",
"if",
"_unused",
":",
"raise",
"TypeError",
"(",
"\"preprocess() doesn't accept positional arguments\"",
")",
"def",
"_decorator",
"(",
"f",
")",
":",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
"=",
"argspec",
"=",
"getargspec",
"(",
"f",
")",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"(",
")",
"no_defaults",
"=",
"(",
"NO_DEFAULT",
",",
")",
"*",
"(",
"len",
"(",
"args",
")",
"-",
"len",
"(",
"defaults",
")",
")",
"args_defaults",
"=",
"list",
"(",
"zip",
"(",
"args",
",",
"no_defaults",
"+",
"defaults",
")",
")",
"if",
"varargs",
":",
"args_defaults",
".",
"append",
"(",
"(",
"varargs",
",",
"NO_DEFAULT",
")",
")",
"if",
"varkw",
":",
"args_defaults",
".",
"append",
"(",
"(",
"varkw",
",",
"NO_DEFAULT",
")",
")",
"argset",
"=",
"set",
"(",
"args",
")",
"|",
"{",
"varargs",
",",
"varkw",
"}",
"-",
"{",
"None",
"}",
"# Arguments can be declared as tuples in Python 2.",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"arg",
",",
"str",
")",
"for",
"arg",
"in",
"args",
")",
":",
"raise",
"TypeError",
"(",
"\"Can't validate functions using tuple unpacking: %s\"",
"%",
"(",
"argspec",
",",
")",
")",
"# Ensure that all processors map to valid names.",
"bad_names",
"=",
"viewkeys",
"(",
"processors",
")",
"-",
"argset",
"if",
"bad_names",
":",
"raise",
"TypeError",
"(",
"\"Got processors for unknown arguments: %s.\"",
"%",
"bad_names",
")",
"return",
"_build_preprocessed_function",
"(",
"f",
",",
"processors",
",",
"args_defaults",
",",
"varargs",
",",
"varkw",
",",
")",
"return",
"_decorator"
] | Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead. | [
"Decorator",
"that",
"applies",
"pre",
"-",
"processors",
"to",
"the",
"arguments",
"of",
"a",
"function",
"before",
"calling",
"the",
"function",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/preprocess.py#L35-L112 |
25,766 | quantopian/zipline | zipline/utils/preprocess.py | call | def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor | python | def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor | [
"def",
"call",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"processor",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"return",
"f",
"(",
"arg",
")",
"return",
"processor"
] | Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2 | [
"Wrap",
"a",
"function",
"in",
"a",
"processor",
"that",
"calls",
"f",
"on",
"the",
"argument",
"before",
"passing",
"it",
"along",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/preprocess.py#L115-L139 |
25,767 | quantopian/zipline | zipline/utils/preprocess.py | _build_preprocessed_function | def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func | python | def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func | [
"def",
"_build_preprocessed_function",
"(",
"func",
",",
"processors",
",",
"args_defaults",
",",
"varargs",
",",
"varkw",
")",
":",
"format_kwargs",
"=",
"{",
"'func_name'",
":",
"func",
".",
"__name__",
"}",
"def",
"mangle",
"(",
"name",
")",
":",
"return",
"'a'",
"+",
"uuid4",
"(",
")",
".",
"hex",
"+",
"name",
"format_kwargs",
"[",
"'mangled_func'",
"]",
"=",
"mangled_funcname",
"=",
"mangle",
"(",
"func",
".",
"__name__",
")",
"def",
"make_processor_assignment",
"(",
"arg",
",",
"processor_name",
")",
":",
"template",
"=",
"\"{arg} = {processor}({func}, '{arg}', {arg})\"",
"return",
"template",
".",
"format",
"(",
"arg",
"=",
"arg",
",",
"processor",
"=",
"processor_name",
",",
"func",
"=",
"mangled_funcname",
",",
")",
"exec_globals",
"=",
"{",
"mangled_funcname",
":",
"func",
",",
"'wraps'",
":",
"wraps",
"}",
"defaults_seen",
"=",
"0",
"default_name_template",
"=",
"'a'",
"+",
"uuid4",
"(",
")",
".",
"hex",
"+",
"'_%d'",
"signature",
"=",
"[",
"]",
"call_args",
"=",
"[",
"]",
"assignments",
"=",
"[",
"]",
"star_map",
"=",
"{",
"varargs",
":",
"'*'",
",",
"varkw",
":",
"'**'",
",",
"}",
"def",
"name_as_arg",
"(",
"arg",
")",
":",
"return",
"star_map",
".",
"get",
"(",
"arg",
",",
"''",
")",
"+",
"arg",
"for",
"arg",
",",
"default",
"in",
"args_defaults",
":",
"if",
"default",
"is",
"NO_DEFAULT",
":",
"signature",
".",
"append",
"(",
"name_as_arg",
"(",
"arg",
")",
")",
"else",
":",
"default_name",
"=",
"default_name_template",
"%",
"defaults_seen",
"exec_globals",
"[",
"default_name",
"]",
"=",
"default",
"signature",
".",
"append",
"(",
"'='",
".",
"join",
"(",
"[",
"name_as_arg",
"(",
"arg",
")",
",",
"default_name",
"]",
")",
")",
"defaults_seen",
"+=",
"1",
"if",
"arg",
"in",
"processors",
":",
"procname",
"=",
"mangle",
"(",
"'_processor_'",
"+",
"arg",
")",
"exec_globals",
"[",
"procname",
"]",
"=",
"processors",
"[",
"arg",
"]",
"assignments",
".",
"append",
"(",
"make_processor_assignment",
"(",
"arg",
",",
"procname",
")",
")",
"call_args",
".",
"append",
"(",
"name_as_arg",
"(",
"arg",
")",
")",
"exec_str",
"=",
"dedent",
"(",
"\"\"\"\\\n @wraps({wrapped_funcname})\n def {func_name}({signature}):\n {assignments}\n return {wrapped_funcname}({call_args})\n \"\"\"",
")",
".",
"format",
"(",
"func_name",
"=",
"func",
".",
"__name__",
",",
"signature",
"=",
"', '",
".",
"join",
"(",
"signature",
")",
",",
"assignments",
"=",
"'\\n '",
".",
"join",
"(",
"assignments",
")",
",",
"wrapped_funcname",
"=",
"mangled_funcname",
",",
"call_args",
"=",
"', '",
".",
"join",
"(",
"call_args",
")",
",",
")",
"compiled",
"=",
"compile",
"(",
"exec_str",
",",
"func",
".",
"__code__",
".",
"co_filename",
",",
"mode",
"=",
"'exec'",
",",
")",
"exec_locals",
"=",
"{",
"}",
"exec_",
"(",
"compiled",
",",
"exec_globals",
",",
"exec_locals",
")",
"new_func",
"=",
"exec_locals",
"[",
"func",
".",
"__name__",
"]",
"code",
"=",
"new_func",
".",
"__code__",
"args",
"=",
"{",
"attr",
":",
"getattr",
"(",
"code",
",",
"attr",
")",
"for",
"attr",
"in",
"dir",
"(",
"code",
")",
"if",
"attr",
".",
"startswith",
"(",
"'co_'",
")",
"}",
"# Copy the firstlineno out of the underlying function so that exceptions",
"# get raised with the correct traceback.",
"# This also makes dynamic source inspection (like IPython `??` operator)",
"# work as intended.",
"try",
":",
"# Try to get the pycode object from the underlying function.",
"original_code",
"=",
"func",
".",
"__code__",
"except",
"AttributeError",
":",
"try",
":",
"# The underlying callable was not a function, try to grab the",
"# `__func__.__code__` which exists on method objects.",
"original_code",
"=",
"func",
".",
"__func__",
".",
"__code__",
"except",
"AttributeError",
":",
"# The underlying callable does not have a `__code__`. There is",
"# nothing for us to correct.",
"return",
"new_func",
"args",
"[",
"'co_firstlineno'",
"]",
"=",
"original_code",
".",
"co_firstlineno",
"new_func",
".",
"__code__",
"=",
"CodeType",
"(",
"*",
"map",
"(",
"getitem",
"(",
"args",
")",
",",
"_code_argorder",
")",
")",
"return",
"new_func"
] | Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func. | [
"Build",
"a",
"preprocessed",
"function",
"with",
"the",
"same",
"signature",
"as",
"func",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/preprocess.py#L142-L247 |
25,768 | quantopian/zipline | zipline/data/benchmarks.py | get_benchmark_returns | def get_benchmark_returns(symbol):
"""
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
"""
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol)
)
data = r.json()
df = pd.DataFrame(data)
df.index = pd.DatetimeIndex(df['date'])
df = df['close']
return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:] | python | def get_benchmark_returns(symbol):
"""
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
"""
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol)
)
data = r.json()
df = pd.DataFrame(data)
df.index = pd.DatetimeIndex(df['date'])
df = df['close']
return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:] | [
"def",
"get_benchmark_returns",
"(",
"symbol",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://api.iextrading.com/1.0/stock/{}/chart/5y'",
".",
"format",
"(",
"symbol",
")",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"df",
".",
"index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"df",
"[",
"'date'",
"]",
")",
"df",
"=",
"df",
"[",
"'close'",
"]",
"return",
"df",
".",
"sort_index",
"(",
")",
".",
"tz_localize",
"(",
"'UTC'",
")",
".",
"pct_change",
"(",
"1",
")",
".",
"iloc",
"[",
"1",
":",
"]"
] | Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data. | [
"Get",
"a",
"Series",
"of",
"benchmark",
"returns",
"from",
"IEX",
"associated",
"with",
"symbol",
".",
"Default",
"is",
"SPY",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/benchmarks.py#L19-L42 |
25,769 | quantopian/zipline | zipline/pipeline/visualize.py | delimit | def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]]) | python | def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]]) | [
"def",
"delimit",
"(",
"delimiters",
",",
"content",
")",
":",
"if",
"len",
"(",
"delimiters",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"`delimiters` must be of length 2. Got %r\"",
"%",
"delimiters",
")",
"return",
"''",
".",
"join",
"(",
"[",
"delimiters",
"[",
"0",
"]",
",",
"content",
",",
"delimiters",
"[",
"1",
"]",
"]",
")"
] | Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"' | [
"Surround",
"content",
"with",
"the",
"first",
"and",
"last",
"characters",
"of",
"delimiters",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L24-L37 |
25,770 | quantopian/zipline | zipline/pipeline/visualize.py | roots | def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0) | python | def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0) | [
"def",
"roots",
"(",
"g",
")",
":",
"return",
"set",
"(",
"n",
"for",
"n",
",",
"d",
"in",
"iteritems",
"(",
"g",
".",
"in_degree",
"(",
")",
")",
"if",
"d",
"==",
"0",
")"
] | Get nodes from graph G with indegree 0 | [
"Get",
"nodes",
"from",
"graph",
"G",
"with",
"indegree",
"0"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L73-L75 |
25,771 | quantopian/zipline | zipline/pipeline/visualize.py | _render | def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout) | python | def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout) | [
"def",
"_render",
"(",
"g",
",",
"out",
",",
"format_",
",",
"include_asset_exists",
"=",
"False",
")",
":",
"graph_attrs",
"=",
"{",
"'rankdir'",
":",
"'TB'",
",",
"'splines'",
":",
"'ortho'",
"}",
"cluster_attrs",
"=",
"{",
"'style'",
":",
"'filled'",
",",
"'color'",
":",
"'lightgoldenrod1'",
"}",
"in_nodes",
"=",
"g",
".",
"loadable_terms",
"out_nodes",
"=",
"list",
"(",
"g",
".",
"outputs",
".",
"values",
"(",
")",
")",
"f",
"=",
"BytesIO",
"(",
")",
"with",
"graph",
"(",
"f",
",",
"\"G\"",
",",
"*",
"*",
"graph_attrs",
")",
":",
"# Write outputs cluster.",
"with",
"cluster",
"(",
"f",
",",
"'Output'",
",",
"labelloc",
"=",
"'b'",
",",
"*",
"*",
"cluster_attrs",
")",
":",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"out_nodes",
")",
":",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write inputs cluster.",
"with",
"cluster",
"(",
"f",
",",
"'Input'",
",",
"*",
"*",
"cluster_attrs",
")",
":",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"in_nodes",
")",
":",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write intermediate results.",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"topological_sort",
"(",
"g",
".",
"graph",
")",
")",
":",
"if",
"term",
"in",
"in_nodes",
"or",
"term",
"in",
"out_nodes",
":",
"continue",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write edges",
"for",
"source",
",",
"dest",
"in",
"g",
".",
"graph",
".",
"edges",
"(",
")",
":",
"if",
"source",
"is",
"AssetExists",
"(",
")",
"and",
"not",
"include_asset_exists",
":",
"continue",
"add_edge",
"(",
"f",
",",
"id",
"(",
"source",
")",
",",
"id",
"(",
"dest",
")",
")",
"cmd",
"=",
"[",
"'dot'",
",",
"'-T'",
",",
"format_",
"]",
"try",
":",
"proc",
"=",
"Popen",
"(",
"cmd",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"RuntimeError",
"(",
"\"Couldn't find `dot` graph layout program. \"",
"\"Make sure Graphviz is installed and `dot` is on your path.\"",
")",
"else",
":",
"raise",
"f",
".",
"seek",
"(",
"0",
")",
"proc_stdout",
",",
"proc_stderr",
"=",
"proc",
".",
"communicate",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"proc_stderr",
":",
"raise",
"RuntimeError",
"(",
"\"Error(s) while rendering graph: %s\"",
"%",
"proc_stderr",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"out",
".",
"write",
"(",
"proc_stdout",
")"
] | Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes. | [
"Draw",
"g",
"as",
"a",
"graph",
"to",
"out",
"in",
"format",
"format",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L84-L149 |
25,772 | quantopian/zipline | zipline/pipeline/visualize.py | display_graph | def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue()) | python | def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue()) | [
"def",
"display_graph",
"(",
"g",
",",
"format",
"=",
"'svg'",
",",
"include_asset_exists",
"=",
"False",
")",
":",
"try",
":",
"import",
"IPython",
".",
"display",
"as",
"display",
"except",
"ImportError",
":",
"raise",
"NoIPython",
"(",
"\"IPython is not installed. Can't display graph.\"",
")",
"if",
"format",
"==",
"'svg'",
":",
"display_cls",
"=",
"display",
".",
"SVG",
"elif",
"format",
"in",
"(",
"\"jpeg\"",
",",
"\"png\"",
")",
":",
"display_cls",
"=",
"partial",
"(",
"display",
".",
"Image",
",",
"format",
"=",
"format",
",",
"embed",
"=",
"True",
")",
"out",
"=",
"BytesIO",
"(",
")",
"_render",
"(",
"g",
",",
"out",
",",
"format",
",",
"include_asset_exists",
"=",
"include_asset_exists",
")",
"return",
"display_cls",
"(",
"data",
"=",
"out",
".",
"getvalue",
"(",
")",
")"
] | Display a TermGraph interactively from within IPython. | [
"Display",
"a",
"TermGraph",
"interactively",
"from",
"within",
"IPython",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L152-L168 |
25,773 | quantopian/zipline | zipline/pipeline/visualize.py | format_attrs | def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' | python | def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' | [
"def",
"format_attrs",
"(",
"attrs",
")",
":",
"if",
"not",
"attrs",
":",
"return",
"''",
"entries",
"=",
"[",
"'='",
".",
"join",
"(",
"(",
"key",
",",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"attrs",
")",
"]",
"return",
"'['",
"+",
"', '",
".",
"join",
"(",
"entries",
")",
"+",
"']'"
] | Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]' | [
"Format",
"key",
"value",
"pairs",
"from",
"attrs",
"into",
"graphviz",
"attrs",
"format"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L215-L227 |
25,774 | quantopian/zipline | zipline/utils/pool.py | SequentialPool.apply_async | def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful) | python | def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful) | [
"def",
"apply_async",
"(",
"f",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"(",
"identity",
"if",
"callback",
"is",
"None",
"else",
"callback",
")",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
"or",
"{",
"}",
")",
",",
")",
"successful",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"value",
"=",
"e",
"successful",
"=",
"False",
"return",
"ApplyAsyncResult",
"(",
"value",
",",
"successful",
")"
] | Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used. | [
"Apply",
"a",
"function",
"but",
"emulate",
"the",
"API",
"of",
"an",
"asynchronous",
"call",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pool.py#L84-L116 |
25,775 | quantopian/zipline | zipline/utils/cli.py | maybe_show_progress | def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it) | python | def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it) | [
"def",
"maybe_show_progress",
"(",
"it",
",",
"show_progress",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"show_progress",
":",
"return",
"click",
".",
"progressbar",
"(",
"it",
",",
"*",
"*",
"kwargs",
")",
"# context manager that just return `it` when we enter it",
"return",
"CallbackManager",
"(",
"lambda",
"it",
"=",
"it",
":",
"it",
")"
] | Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
... | [
"Optionally",
"show",
"a",
"progress",
"bar",
"for",
"the",
"given",
"iterator",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cli.py#L7-L36 |
25,776 | quantopian/zipline | zipline/__main__.py | main | def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
) | python | def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
) | [
"def",
"main",
"(",
"extension",
",",
"strict_extensions",
",",
"default_extension",
",",
"x",
")",
":",
"# install a logbook handler before performing any other operations",
"logbook",
".",
"StderrHandler",
"(",
")",
".",
"push_application",
"(",
")",
"create_args",
"(",
"x",
",",
"zipline",
".",
"extension_args",
")",
"load_extensions",
"(",
"default_extension",
",",
"extension",
",",
"strict_extensions",
",",
"os",
".",
"environ",
",",
")"
] | Top level zipline entry point. | [
"Top",
"level",
"zipline",
"entry",
"point",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L49-L61 |
25,777 | quantopian/zipline | zipline/__main__.py | ipython_only | def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d | python | def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d | [
"def",
"ipython_only",
"(",
"option",
")",
":",
"if",
"__IPYTHON__",
":",
"return",
"option",
"argname",
"=",
"extract_option_object",
"(",
"option",
")",
".",
"name",
"def",
"d",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"_",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"argname",
"]",
"=",
"None",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_",
"return",
"d"
] | Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode. | [
"Mark",
"that",
"an",
"option",
"should",
"only",
"be",
"exposed",
"in",
"IPython",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L84-L109 |
25,778 | quantopian/zipline | zipline/__main__.py | zipline_magic | def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code) | python | def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code) | [
"def",
"zipline_magic",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"load_extensions",
"(",
"default",
"=",
"True",
",",
"extensions",
"=",
"[",
"]",
",",
"strict",
"=",
"True",
",",
"environ",
"=",
"os",
".",
"environ",
",",
")",
"try",
":",
"return",
"run",
".",
"main",
"(",
"# put our overrides at the start of the parameter list so that",
"# users may pass values with higher precedence",
"[",
"'--algotext'",
",",
"cell",
",",
"'--output'",
",",
"os",
".",
"devnull",
",",
"# don't write the results by default",
"]",
"+",
"(",
"[",
"# these options are set when running in line magic mode",
"# set a non None algo text to use the ipython user_ns",
"'--algotext'",
",",
"''",
",",
"'--local-namespace'",
",",
"]",
"if",
"cell",
"is",
"None",
"else",
"[",
"]",
")",
"+",
"line",
".",
"split",
"(",
")",
",",
"'%s%%zipline'",
"%",
"(",
"(",
"cell",
"or",
"''",
")",
"and",
"'%'",
")",
",",
"# don't use system exit and propogate errors to the caller",
"standalone_mode",
"=",
"False",
",",
")",
"except",
"SystemExit",
"as",
"e",
":",
"# https://github.com/mitsuhiko/click/pull/533",
"# even in standalone_mode=False `--help` really wants to kill us ;_;",
"if",
"e",
".",
"code",
":",
"raise",
"ValueError",
"(",
"'main returned non-zero status code: %d'",
"%",
"e",
".",
"code",
")"
] | The zipline IPython cell magic. | [
"The",
"zipline",
"IPython",
"cell",
"magic",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L287-L317 |
25,779 | quantopian/zipline | zipline/__main__.py | ingest | def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
) | python | def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
) | [
"def",
"ingest",
"(",
"bundle",
",",
"assets_version",
",",
"show_progress",
")",
":",
"bundles_module",
".",
"ingest",
"(",
"bundle",
",",
"os",
".",
"environ",
",",
"pd",
".",
"Timestamp",
".",
"utcnow",
"(",
")",
",",
"assets_version",
",",
"show_progress",
",",
")"
] | Ingest the data for the given bundle. | [
"Ingest",
"the",
"data",
"for",
"the",
"given",
"bundle",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L340-L349 |
25,780 | quantopian/zipline | zipline/__main__.py | clean | def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
) | python | def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
) | [
"def",
"clean",
"(",
"bundle",
",",
"before",
",",
"after",
",",
"keep_last",
")",
":",
"bundles_module",
".",
"clean",
"(",
"bundle",
",",
"before",
",",
"after",
",",
"keep_last",
",",
")"
] | Clean up data downloaded with the ingest command. | [
"Clean",
"up",
"data",
"downloaded",
"with",
"the",
"ingest",
"command",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L383-L391 |
25,781 | quantopian/zipline | zipline/__main__.py | bundles | def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp)) | python | def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp)) | [
"def",
"bundles",
"(",
")",
":",
"for",
"bundle",
"in",
"sorted",
"(",
"bundles_module",
".",
"bundles",
".",
"keys",
"(",
")",
")",
":",
"if",
"bundle",
".",
"startswith",
"(",
"'.'",
")",
":",
"# hide the test data",
"continue",
"try",
":",
"ingestions",
"=",
"list",
"(",
"map",
"(",
"text_type",
",",
"bundles_module",
".",
"ingestions_for_bundle",
"(",
"bundle",
")",
")",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"ingestions",
"=",
"[",
"]",
"# If we got no ingestions, either because the directory didn't exist or",
"# because there were no entries, print a single message indicating that",
"# no ingestions have yet been made.",
"for",
"timestamp",
"in",
"ingestions",
"or",
"[",
"\"<no ingestions>\"",
"]",
":",
"click",
".",
"echo",
"(",
"\"%s %s\"",
"%",
"(",
"bundle",
",",
"timestamp",
")",
")"
] | List all of the available data bundles. | [
"List",
"all",
"of",
"the",
"available",
"data",
"bundles",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L395-L415 |
25,782 | quantopian/zipline | zipline/pipeline/filters/filter.py | binary_operator | def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator | python | def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator | [
"def",
"binary_operator",
"(",
"op",
")",
":",
"# When combining a Filter with a NumericalExpression, we use this",
"# attrgetter instance to defer to the commuted interpretation of the",
"# NumericalExpression operator.",
"commuted_method_getter",
"=",
"attrgetter",
"(",
"method_name_for_op",
"(",
"op",
",",
"commute",
"=",
"True",
")",
")",
"def",
"binary_operator",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"build_binary_op",
"(",
"op",
",",
"other",
",",
")",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"({left}) {op} ({right})\"",
".",
"format",
"(",
"left",
"=",
"self_expr",
",",
"op",
"=",
"op",
",",
"right",
"=",
"other_expr",
",",
")",
",",
"new_inputs",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"NumericalExpression",
")",
":",
"# NumericalExpression overrides numerical ops to correctly handle",
"# merging of inputs. Look up and call the appropriate",
"# right-binding operator with ourself as the input.",
"return",
"commuted_method_getter",
"(",
"other",
")",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Term",
")",
":",
"if",
"other",
".",
"dtype",
"!=",
"bool_dtype",
":",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"self",
",",
"other",
")",
"if",
"self",
"is",
"other",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
",",
")",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} x_1\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
"other",
")",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"int",
")",
":",
"# Note that this is true for bool as well",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} {constant}\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"constant",
"=",
"int",
"(",
"other",
")",
")",
",",
"binds",
"=",
"(",
"self",
",",
")",
",",
")",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"self",
",",
"other",
")",
"binary_operator",
".",
"__doc__",
"=",
"\"Binary Operator: '%s'\"",
"%",
"op",
"return",
"binary_operator"
] | Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__. | [
"Factory",
"function",
"for",
"making",
"binary",
"operator",
"methods",
"on",
"a",
"Filter",
"subclass",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L62-L112 |
25,783 | quantopian/zipline | zipline/pipeline/filters/filter.py | unary_operator | def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator | python | def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator | [
"def",
"unary_operator",
"(",
"op",
")",
":",
"valid_ops",
"=",
"{",
"'~'",
"}",
"if",
"op",
"not",
"in",
"valid_ops",
":",
"raise",
"ValueError",
"(",
"\"Invalid unary operator %s.\"",
"%",
"op",
")",
"def",
"unary_operator",
"(",
"self",
")",
":",
"# This can't be hoisted up a scope because the types returned by",
"# unary_op_return_type aren't defined when the top-level function is",
"# invoked.",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"{op}({expr})\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"expr",
"=",
"self",
".",
"_expr",
")",
",",
"self",
".",
"inputs",
",",
")",
"else",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"{op}x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
")",
"unary_operator",
".",
"__doc__",
"=",
"\"Unary Operator: '%s'\"",
"%",
"op",
"return",
"unary_operator"
] | Factory function for making unary operator methods for Filters. | [
"Factory",
"function",
"for",
"making",
"unary",
"operator",
"methods",
"for",
"Filters",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L115-L136 |
25,784 | quantopian/zipline | zipline/pipeline/filters/filter.py | NumExprFilter.create | def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype) | python | def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype) | [
"def",
"create",
"(",
"cls",
",",
"expr",
",",
"binds",
")",
":",
"return",
"cls",
"(",
"expr",
"=",
"expr",
",",
"binds",
"=",
"binds",
",",
"dtype",
"=",
"bool_dtype",
")"
] | Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype. | [
"Helper",
"for",
"creating",
"new",
"NumExprFactors",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L237-L245 |
25,785 | quantopian/zipline | zipline/pipeline/filters/filter.py | NumExprFilter._compute | def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask | python | def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask | [
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"return",
"super",
"(",
"NumExprFilter",
",",
"self",
")",
".",
"_compute",
"(",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
",",
")",
"&",
"mask"
] | Compute our result with numexpr, then re-apply `mask`. | [
"Compute",
"our",
"result",
"with",
"numexpr",
"then",
"re",
"-",
"apply",
"mask",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L247-L256 |
25,786 | quantopian/zipline | zipline/pipeline/filters/filter.py | PercentileFilter._validate | def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate() | python | def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate() | [
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"not",
"0.0",
"<=",
"self",
".",
"_min_percentile",
"<",
"self",
".",
"_max_percentile",
"<=",
"100.0",
":",
"raise",
"BadPercentileBounds",
"(",
"min_percentile",
"=",
"self",
".",
"_min_percentile",
",",
"max_percentile",
"=",
"self",
".",
"_max_percentile",
",",
"upper_bound",
"=",
"100.0",
")",
"return",
"super",
"(",
"PercentileFilter",
",",
"self",
")",
".",
"_validate",
"(",
")"
] | Ensure that our percentile bounds are well-formed. | [
"Ensure",
"that",
"our",
"percentile",
"bounds",
"are",
"well",
"-",
"formed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L344-L354 |
25,787 | quantopian/zipline | zipline/pipeline/filters/filter.py | PercentileFilter._compute | def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds) | python | def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds) | [
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"# TODO: Review whether there's a better way of handling small numbers",
"# of columns.",
"data",
"=",
"arrays",
"[",
"0",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"float64",
")",
"data",
"[",
"~",
"mask",
"]",
"=",
"nan",
"# FIXME: np.nanpercentile **should** support computing multiple bounds",
"# at once, but there's a bug in the logic for multiple bounds in numpy",
"# 1.9.2. It will be fixed in 1.10.",
"# c.f. https://github.com/numpy/numpy/pull/5981",
"lower_bounds",
"=",
"nanpercentile",
"(",
"data",
",",
"self",
".",
"_min_percentile",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
",",
")",
"upper_bounds",
"=",
"nanpercentile",
"(",
"data",
",",
"self",
".",
"_max_percentile",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
",",
")",
"return",
"(",
"lower_bounds",
"<=",
"data",
")",
"&",
"(",
"data",
"<=",
"upper_bounds",
")"
] | For each row in the input, compute a mask of all values falling between
the given percentiles. | [
"For",
"each",
"row",
"in",
"the",
"input",
"compute",
"a",
"mask",
"of",
"all",
"values",
"falling",
"between",
"the",
"given",
"percentiles",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L356-L382 |
25,788 | quantopian/zipline | zipline/data/treasuries.py | parse_treasury_csv_column | def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month') | python | def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month') | [
"def",
"parse_treasury_csv_column",
"(",
"column",
")",
":",
"column_re",
"=",
"re",
".",
"compile",
"(",
"r\"^(?P<prefix>RIFLGFC)\"",
"\"(?P<unit>[YM])\"",
"\"(?P<periods>[0-9]{2})\"",
"\"(?P<suffix>_N.B)$\"",
")",
"match",
"=",
"column_re",
".",
"match",
"(",
"column",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Couldn't parse CSV column %r.\"",
"%",
"column",
")",
"unit",
",",
"periods",
"=",
"get_unit_and_periods",
"(",
"match",
".",
"groupdict",
"(",
")",
")",
"# Roundtrip through int to coerce '06' into '6'.",
"return",
"str",
"(",
"int",
"(",
"periods",
")",
")",
"+",
"(",
"'year'",
"if",
"unit",
"==",
"'Y'",
"else",
"'month'",
")"
] | Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year. | [
"Parse",
"a",
"treasury",
"CSV",
"column",
"into",
"a",
"more",
"human",
"-",
"readable",
"format",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries.py#L25-L47 |
25,789 | quantopian/zipline | zipline/data/treasuries.py | get_daily_10yr_treasury_data | def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True) | python | def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True) | [
"def",
"get_daily_10yr_treasury_data",
"(",
")",
":",
"url",
"=",
"\"https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15\"",
"\"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=\"",
"\"&filetype=csv&label=include&layout=seriescolumn\"",
"return",
"pd",
".",
"read_csv",
"(",
"url",
",",
"header",
"=",
"5",
",",
"index_col",
"=",
"0",
",",
"names",
"=",
"[",
"'DATE'",
",",
"'BC_10YEAR'",
"]",
",",
"parse_dates",
"=",
"True",
",",
"converters",
"=",
"{",
"1",
":",
"dataconverter",
"}",
",",
"squeeze",
"=",
"True",
")"
] | Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series. | [
"Download",
"daily",
"10",
"year",
"treasury",
"rates",
"from",
"the",
"Federal",
"Reserve",
"and",
"return",
"a",
"pandas",
".",
"Series",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries.py#L93-L101 |
25,790 | quantopian/zipline | zipline/data/minute_bars.py | _sid_subdir_path | def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
) | python | def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
) | [
"def",
"_sid_subdir_path",
"(",
"sid",
")",
":",
"padded_sid",
"=",
"format",
"(",
"sid",
",",
"'06'",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"# subdir 1 00/XX",
"padded_sid",
"[",
"0",
":",
"2",
"]",
",",
"# subdir 2 XX/00",
"padded_sid",
"[",
"2",
":",
"4",
"]",
",",
"\"{0}.bcolz\"",
".",
"format",
"(",
"str",
"(",
"padded_sid",
")",
")",
")"
] | Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz | [
"Format",
"subdir",
"path",
"to",
"limit",
"the",
"number",
"directories",
"in",
"any",
"given",
"subdirectory",
"to",
"100",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L85-L113 |
25,791 | quantopian/zipline | zipline/data/minute_bars.py | convert_cols | def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round()
scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round()
scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round()
scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round()
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes | python | def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round()
scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round()
scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round()
scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round()
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes | [
"def",
"convert_cols",
"(",
"cols",
",",
"scale_factor",
",",
"sid",
",",
"invalid_data_behavior",
")",
":",
"scaled_opens",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'open'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_highs",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'high'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_lows",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'low'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_closes",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'close'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"exclude_mask",
"=",
"np",
".",
"zeros_like",
"(",
"scaled_opens",
",",
"dtype",
"=",
"bool",
")",
"for",
"col_name",
",",
"scaled_col",
"in",
"[",
"(",
"'open'",
",",
"scaled_opens",
")",
",",
"(",
"'high'",
",",
"scaled_highs",
")",
",",
"(",
"'low'",
",",
"scaled_lows",
")",
",",
"(",
"'close'",
",",
"scaled_closes",
")",
",",
"]",
":",
"max_val",
"=",
"scaled_col",
".",
"max",
"(",
")",
"try",
":",
"check_uint32_safe",
"(",
"max_val",
",",
"col_name",
")",
"except",
"ValueError",
":",
"if",
"invalid_data_behavior",
"==",
"'raise'",
":",
"raise",
"if",
"invalid_data_behavior",
"==",
"'warn'",
":",
"logger",
".",
"warn",
"(",
"'Values for sid={}, col={} contain some too large for '",
"'uint32 (max={}), filtering them out'",
",",
"sid",
",",
"col_name",
",",
"max_val",
",",
")",
"# We want to exclude all rows that have an unsafe value in",
"# this column.",
"exclude_mask",
"&=",
"(",
"scaled_col",
">=",
"np",
".",
"iinfo",
"(",
"np",
".",
"uint32",
")",
".",
"max",
")",
"# Convert all cols to uint32.",
"opens",
"=",
"scaled_opens",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"highs",
"=",
"scaled_highs",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"lows",
"=",
"scaled_lows",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"closes",
"=",
"scaled_closes",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"volumes",
"=",
"cols",
"[",
"'volume'",
"]",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"# Exclude rows with unsafe values by setting to zero.",
"opens",
"[",
"exclude_mask",
"]",
"=",
"0",
"highs",
"[",
"exclude_mask",
"]",
"=",
"0",
"lows",
"[",
"exclude_mask",
"]",
"=",
"0",
"closes",
"[",
"exclude_mask",
"]",
"=",
"0",
"volumes",
"[",
"exclude_mask",
"]",
"=",
"0",
"return",
"opens",
",",
"highs",
",",
"lows",
",",
"closes",
",",
"volumes"
] | Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values. | [
"Adapt",
"OHLCV",
"columns",
"into",
"uint32",
"columns",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L116-L180 |
25,792 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarMetadata.write | def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp) | python | def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp) | [
"def",
"write",
"(",
"self",
",",
"rootdir",
")",
":",
"calendar",
"=",
"self",
".",
"calendar",
"slicer",
"=",
"calendar",
".",
"schedule",
".",
"index",
".",
"slice_indexer",
"(",
"self",
".",
"start_session",
",",
"self",
".",
"end_session",
",",
")",
"schedule",
"=",
"calendar",
".",
"schedule",
"[",
"slicer",
"]",
"market_opens",
"=",
"schedule",
".",
"market_open",
"market_closes",
"=",
"schedule",
".",
"market_close",
"metadata",
"=",
"{",
"'version'",
":",
"self",
".",
"version",
",",
"'ohlc_ratio'",
":",
"self",
".",
"default_ohlc_ratio",
",",
"'ohlc_ratios_per_sid'",
":",
"self",
".",
"ohlc_ratios_per_sid",
",",
"'minutes_per_day'",
":",
"self",
".",
"minutes_per_day",
",",
"'calendar_name'",
":",
"self",
".",
"calendar",
".",
"name",
",",
"'start_session'",
":",
"str",
"(",
"self",
".",
"start_session",
".",
"date",
"(",
")",
")",
",",
"'end_session'",
":",
"str",
"(",
"self",
".",
"end_session",
".",
"date",
"(",
")",
")",
",",
"# Write these values for backwards compatibility",
"'first_trading_day'",
":",
"str",
"(",
"self",
".",
"start_session",
".",
"date",
"(",
")",
")",
",",
"'market_opens'",
":",
"(",
"market_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
".",
"tolist",
"(",
")",
")",
",",
"'market_closes'",
":",
"(",
"market_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
".",
"tolist",
"(",
")",
")",
",",
"}",
"with",
"open",
"(",
"self",
".",
"metadata_path",
"(",
"rootdir",
")",
",",
"'w+'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"metadata",
",",
"fp",
")"
] | Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch. | [
"Write",
"the",
"metadata",
"to",
"a",
"JSON",
"file",
"in",
"the",
"rootdir",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L280-L349 |
25,793 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.open | def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
) | python | def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
) | [
"def",
"open",
"(",
"cls",
",",
"rootdir",
",",
"end_session",
"=",
"None",
")",
":",
"metadata",
"=",
"BcolzMinuteBarMetadata",
".",
"read",
"(",
"rootdir",
")",
"return",
"BcolzMinuteBarWriter",
"(",
"rootdir",
",",
"metadata",
".",
"calendar",
",",
"metadata",
".",
"start_session",
",",
"end_session",
"if",
"end_session",
"is",
"not",
"None",
"else",
"metadata",
".",
"end_session",
",",
"metadata",
".",
"minutes_per_day",
",",
"metadata",
".",
"default_ohlc_ratio",
",",
"metadata",
".",
"ohlc_ratios_per_sid",
",",
"write_metadata",
"=",
"end_session",
"is",
"not",
"None",
")"
] | Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``. | [
"Open",
"an",
"existing",
"rootdir",
"for",
"writing",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L482-L501 |
25,794 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter._init_ctable | def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table | python | def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table | [
"def",
"_init_ctable",
"(",
"self",
",",
"path",
")",
":",
"# Only create the containing subdir on creation.",
"# This is not to be confused with the `.bcolz` directory, but is the",
"# directory up one level from the `.bcolz` directories.",
"sid_containing_dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sid_containing_dirname",
")",
":",
"# Other sids may have already created the containing directory.",
"os",
".",
"makedirs",
"(",
"sid_containing_dirname",
")",
"initial_array",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"np",
".",
"uint32",
")",
"table",
"=",
"ctable",
"(",
"rootdir",
"=",
"path",
",",
"columns",
"=",
"[",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"]",
",",
"names",
"=",
"[",
"'open'",
",",
"'high'",
",",
"'low'",
",",
"'close'",
",",
"'volume'",
"]",
",",
"expectedlen",
"=",
"self",
".",
"_expectedlen",
",",
"mode",
"=",
"'w'",
",",
")",
"table",
".",
"flush",
"(",
")",
"return",
"table"
] | Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable. | [
"Create",
"empty",
"ctable",
"for",
"given",
"path",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L560-L597 |
25,795 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter._ensure_ctable | def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a') | python | def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a') | [
"def",
"_ensure_ctable",
"(",
"self",
",",
"sid",
")",
":",
"sidpath",
"=",
"self",
".",
"sidpath",
"(",
"sid",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sidpath",
")",
":",
"return",
"self",
".",
"_init_ctable",
"(",
"sidpath",
")",
"return",
"bcolz",
".",
"ctable",
"(",
"rootdir",
"=",
"sidpath",
",",
"mode",
"=",
"'a'",
")"
] | Ensure that a ctable exists for ``sid``, then return it. | [
"Ensure",
"that",
"a",
"ctable",
"exists",
"for",
"sid",
"then",
"return",
"it",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L599-L604 |
25,796 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.pad | def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date) | python | def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date) | [
"def",
"pad",
"(",
"self",
",",
"sid",
",",
"date",
")",
":",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"last_date",
"=",
"self",
".",
"last_date_in_output_for_sid",
"(",
"sid",
")",
"tds",
"=",
"self",
".",
"_session_labels",
"if",
"date",
"<=",
"last_date",
"or",
"date",
"<",
"tds",
"[",
"0",
"]",
":",
"# No need to pad.",
"return",
"if",
"last_date",
"==",
"pd",
".",
"NaT",
":",
"# If there is no data, determine how many days to add so that",
"# desired days are written to the correct slots.",
"days_to_zerofill",
"=",
"tds",
"[",
"tds",
".",
"slice_indexer",
"(",
"end",
"=",
"date",
")",
"]",
"else",
":",
"days_to_zerofill",
"=",
"tds",
"[",
"tds",
".",
"slice_indexer",
"(",
"start",
"=",
"last_date",
"+",
"tds",
".",
"freq",
",",
"end",
"=",
"date",
")",
"]",
"self",
".",
"_zerofill",
"(",
"table",
",",
"len",
"(",
"days_to_zerofill",
")",
")",
"new_last_date",
"=",
"self",
".",
"last_date_in_output_for_sid",
"(",
"sid",
")",
"assert",
"new_last_date",
"==",
"date",
",",
"\"new_last_date={0} != date={1}\"",
".",
"format",
"(",
"new_last_date",
",",
"date",
")"
] | Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date` | [
"Fill",
"sid",
"container",
"with",
"empty",
"data",
"through",
"the",
"specified",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L618-L659 |
25,797 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.set_sid_attrs | def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v | python | def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v | [
"def",
"set_sid_attrs",
"(",
"self",
",",
"sid",
",",
"*",
"*",
"kwargs",
")",
":",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"table",
".",
"attrs",
"[",
"k",
"]",
"=",
"v"
] | Write all the supplied kwargs as attributes of the sid's file. | [
"Write",
"all",
"the",
"supplied",
"kwargs",
"as",
"attributes",
"of",
"the",
"sid",
"s",
"file",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L661-L666 |
25,798 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.write | def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior) | python | def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior) | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"show_progress",
"=",
"False",
",",
"invalid_data_behavior",
"=",
"'warn'",
")",
":",
"ctx",
"=",
"maybe_show_progress",
"(",
"data",
",",
"show_progress",
"=",
"show_progress",
",",
"item_show_func",
"=",
"lambda",
"e",
":",
"e",
"if",
"e",
"is",
"None",
"else",
"str",
"(",
"e",
"[",
"0",
"]",
")",
",",
"label",
"=",
"\"Merging minute equity files:\"",
",",
")",
"write_sid",
"=",
"self",
".",
"write_sid",
"with",
"ctx",
"as",
"it",
":",
"for",
"e",
"in",
"it",
":",
"write_sid",
"(",
"*",
"e",
",",
"invalid_data_behavior",
"=",
"invalid_data_behavior",
")"
] | Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing. | [
"Write",
"a",
"stream",
"of",
"minute",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L668-L697 |
25,799 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.data_len_for_day | def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day | python | def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day | [
"def",
"data_len_for_day",
"(",
"self",
",",
"day",
")",
":",
"day_ix",
"=",
"self",
".",
"_session_labels",
".",
"get_loc",
"(",
"day",
")",
"# Add one to the 0-indexed day_ix to get the number of days.",
"num_days",
"=",
"day_ix",
"+",
"1",
"return",
"num_days",
"*",
"self",
".",
"_minutes_per_day"
] | Return the number of data points up to and including the
provided day. | [
"Return",
"the",
"number",
"of",
"data",
"points",
"up",
"to",
"and",
"including",
"the",
"provided",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L846-L854 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.