id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
25,900 | quantopian/zipline | zipline/pipeline/factors/basic.py | _ExponentialWeightedFactor.from_halflife | def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | python | def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | [
"def",
"from_halflife",
"(",
"cls",
",",
"inputs",
",",
"window_length",
",",
"halflife",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"halflife",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"`span` must be a positive number. %s was passed.\"",
"%",
"halflife",
")",
"decay_rate",
"=",
"exp",
"(",
"log",
"(",
".5",
")",
"/",
"halflife",
")",
"assert",
"0.0",
"<",
"decay_rate",
"<=",
"1.0",
"return",
"cls",
"(",
"inputs",
"=",
"inputs",
",",
"window_length",
"=",
"window_length",
",",
"decay_rate",
"=",
"decay_rate",
",",
"*",
"*",
"kwargs",
")"
] | Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`. | [
"Convenience",
"constructor",
"for",
"passing",
"decay_rate",
"in",
"terms",
"of",
"half",
"life",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L244-L286 |
25,901 | quantopian/zipline | zipline/pipeline/factors/basic.py | _ExponentialWeightedFactor.from_center_of_mass | def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
) | python | def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
) | [
"def",
"from_center_of_mass",
"(",
"cls",
",",
"inputs",
",",
"window_length",
",",
"center_of_mass",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cls",
"(",
"inputs",
"=",
"inputs",
",",
"window_length",
"=",
"window_length",
",",
"decay_rate",
"=",
"(",
"1.0",
"-",
"(",
"1.0",
"/",
"(",
"1.0",
"+",
"center_of_mass",
")",
")",
")",
",",
"*",
"*",
"kwargs",
")"
] | Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`. | [
"Convenience",
"constructor",
"for",
"passing",
"decay_rate",
"in",
"terms",
"of",
"center",
"of",
"mass",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L289-L328 |
25,902 | quantopian/zipline | zipline/utils/math_utils.py | tolerant_equals | def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False):
"""Check if a and b are equal with some tolerance.
Parameters
----------
a, b : float
The floats to check for equality.
atol : float, optional
The absolute tolerance.
rtol : float, optional
The relative tolerance.
equal_nan : bool, optional
Should NaN compare equal?
See Also
--------
numpy.isclose
Notes
-----
This function is just a scalar version of numpy.isclose for performance.
See the docstring of ``isclose`` for more information about ``atol`` and
``rtol``.
"""
if equal_nan and isnan(a) and isnan(b):
return True
return math.fabs(a - b) <= (atol + rtol * math.fabs(b)) | python | def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False):
"""Check if a and b are equal with some tolerance.
Parameters
----------
a, b : float
The floats to check for equality.
atol : float, optional
The absolute tolerance.
rtol : float, optional
The relative tolerance.
equal_nan : bool, optional
Should NaN compare equal?
See Also
--------
numpy.isclose
Notes
-----
This function is just a scalar version of numpy.isclose for performance.
See the docstring of ``isclose`` for more information about ``atol`` and
``rtol``.
"""
if equal_nan and isnan(a) and isnan(b):
return True
return math.fabs(a - b) <= (atol + rtol * math.fabs(b)) | [
"def",
"tolerant_equals",
"(",
"a",
",",
"b",
",",
"atol",
"=",
"10e-7",
",",
"rtol",
"=",
"10e-7",
",",
"equal_nan",
"=",
"False",
")",
":",
"if",
"equal_nan",
"and",
"isnan",
"(",
"a",
")",
"and",
"isnan",
"(",
"b",
")",
":",
"return",
"True",
"return",
"math",
".",
"fabs",
"(",
"a",
"-",
"b",
")",
"<=",
"(",
"atol",
"+",
"rtol",
"*",
"math",
".",
"fabs",
"(",
"b",
")",
")"
] | Check if a and b are equal with some tolerance.
Parameters
----------
a, b : float
The floats to check for equality.
atol : float, optional
The absolute tolerance.
rtol : float, optional
The relative tolerance.
equal_nan : bool, optional
Should NaN compare equal?
See Also
--------
numpy.isclose
Notes
-----
This function is just a scalar version of numpy.isclose for performance.
See the docstring of ``isclose`` for more information about ``atol`` and
``rtol``. | [
"Check",
"if",
"a",
"and",
"b",
"are",
"equal",
"with",
"some",
"tolerance",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/math_utils.py#L21-L47 |
25,903 | quantopian/zipline | zipline/utils/math_utils.py | round_if_near_integer | def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a | python | def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a | [
"def",
"round_if_near_integer",
"(",
"a",
",",
"epsilon",
"=",
"1e-4",
")",
":",
"if",
"abs",
"(",
"a",
"-",
"round",
"(",
"a",
")",
")",
"<=",
"epsilon",
":",
"return",
"round",
"(",
"a",
")",
"else",
":",
"return",
"a"
] | Round a to the nearest integer if that integer is within an epsilon
of a. | [
"Round",
"a",
"to",
"the",
"nearest",
"integer",
"if",
"that",
"integer",
"is",
"within",
"an",
"epsilon",
"of",
"a",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/math_utils.py#L72-L80 |
25,904 | quantopian/zipline | zipline/pipeline/factors/factor.py | binop_return_dtype | def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype | python | def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype | [
"def",
"binop_return_dtype",
"(",
"op",
",",
"left",
",",
"right",
")",
":",
"if",
"is_comparison",
"(",
"op",
")",
":",
"if",
"left",
"!=",
"right",
":",
"raise",
"TypeError",
"(",
"\"Don't know how to compute {left} {op} {right}.\\n\"",
"\"Comparisons are only supported between Factors of equal \"",
"\"dtypes.\"",
".",
"format",
"(",
"left",
"=",
"left",
",",
"op",
"=",
"op",
",",
"right",
"=",
"right",
")",
")",
"return",
"bool_dtype",
"elif",
"left",
"!=",
"float64_dtype",
"or",
"right",
"!=",
"float64_dtype",
":",
"raise",
"TypeError",
"(",
"\"Don't know how to compute {left} {op} {right}.\\n\"",
"\"Arithmetic operators are only supported between Factors of \"",
"\"dtype 'float64'.\"",
".",
"format",
"(",
"left",
"=",
"left",
".",
"name",
",",
"op",
"=",
"op",
",",
"right",
"=",
"right",
".",
"name",
",",
")",
")",
"return",
"float64_dtype"
] | Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`. | [
"Compute",
"the",
"expected",
"return",
"dtype",
"for",
"the",
"given",
"binary",
"operator",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L101-L138 |
25,905 | quantopian/zipline | zipline/pipeline/factors/factor.py | binary_operator | def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator | python | def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator | [
"def",
"binary_operator",
"(",
"op",
")",
":",
"# When combining a Factor with a NumericalExpression, we use this",
"# attrgetter instance to defer to the commuted implementation of the",
"# NumericalExpression operator.",
"commuted_method_getter",
"=",
"attrgetter",
"(",
"method_name_for_op",
"(",
"op",
",",
"commute",
"=",
"True",
")",
")",
"@",
"with_doc",
"(",
"\"Binary Operator: '%s'\"",
"%",
"op",
")",
"@",
"with_name",
"(",
"method_name_for_op",
"(",
"op",
")",
")",
"@",
"coerce_numbers_to_my_dtype",
"def",
"binary_operator",
"(",
"self",
",",
"other",
")",
":",
"# This can't be hoisted up a scope because the types returned by",
"# binop_return_type aren't defined when the top-level function is",
"# invoked in the class body of Factor.",
"return_type",
"=",
"binop_return_type",
"(",
"op",
")",
"if",
"isinstance",
"(",
"self",
",",
"NumExprFactor",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"build_binary_op",
"(",
"op",
",",
"other",
",",
")",
"return",
"return_type",
"(",
"\"({left}) {op} ({right})\"",
".",
"format",
"(",
"left",
"=",
"self_expr",
",",
"op",
"=",
"op",
",",
"right",
"=",
"other_expr",
",",
")",
",",
"new_inputs",
",",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"self",
".",
"dtype",
",",
"other",
".",
"dtype",
")",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"NumExprFactor",
")",
":",
"# NumericalExpression overrides ops to correctly handle merging of",
"# inputs. Look up and call the appropriate reflected operator with",
"# ourself as the input.",
"return",
"commuted_method_getter",
"(",
"other",
")",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Term",
")",
":",
"if",
"self",
"is",
"other",
":",
"return",
"return_type",
"(",
"\"x_0 {op} x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
",",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"self",
".",
"dtype",
",",
"other",
".",
"dtype",
")",
",",
")",
"return",
"return_type",
"(",
"\"x_0 {op} x_1\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
"other",
")",
",",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"self",
".",
"dtype",
",",
"other",
".",
"dtype",
")",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"return",
"return_type",
"(",
"\"x_0 {op} ({constant})\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"constant",
"=",
"other",
")",
",",
"binds",
"=",
"(",
"self",
",",
")",
",",
"# .dtype access is safe here because coerce_numbers_to_my_dtype",
"# will convert any input numbers to numpy equivalents.",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"self",
".",
"dtype",
",",
"other",
".",
"dtype",
")",
")",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"self",
",",
"other",
")",
"return",
"binary_operator"
] | Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__. | [
"Factory",
"function",
"for",
"making",
"binary",
"operator",
"methods",
"on",
"a",
"Factor",
"subclass",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L141-L201 |
25,906 | quantopian/zipline | zipline/pipeline/factors/factor.py | reflected_binary_operator | def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator | python | def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator | [
"def",
"reflected_binary_operator",
"(",
"op",
")",
":",
"assert",
"not",
"is_comparison",
"(",
"op",
")",
"@",
"with_name",
"(",
"method_name_for_op",
"(",
"op",
",",
"commute",
"=",
"True",
")",
")",
"@",
"coerce_numbers_to_my_dtype",
"def",
"reflected_binary_operator",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"build_binary_op",
"(",
"op",
",",
"other",
")",
"return",
"NumExprFactor",
"(",
"\"({left}) {op} ({right})\"",
".",
"format",
"(",
"left",
"=",
"other_expr",
",",
"right",
"=",
"self_expr",
",",
"op",
"=",
"op",
",",
")",
",",
"new_inputs",
",",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"other",
".",
"dtype",
",",
"self",
".",
"dtype",
")",
")",
"# Only have to handle the numeric case because in all other valid cases",
"# the corresponding left-binding method will be called.",
"elif",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"return",
"NumExprFactor",
"(",
"\"{constant} {op} x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"constant",
"=",
"other",
")",
",",
"binds",
"=",
"(",
"self",
",",
")",
",",
"dtype",
"=",
"binop_return_dtype",
"(",
"op",
",",
"other",
".",
"dtype",
",",
"self",
".",
"dtype",
")",
",",
")",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"other",
",",
"self",
")",
"return",
"reflected_binary_operator"
] | Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__. | [
"Factory",
"function",
"for",
"making",
"binary",
"operator",
"methods",
"on",
"a",
"Factor",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L204-L240 |
25,907 | quantopian/zipline | zipline/pipeline/factors/factor.py | unary_operator | def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator | python | def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator | [
"def",
"unary_operator",
"(",
"op",
")",
":",
"# Only negate is currently supported.",
"valid_ops",
"=",
"{",
"'-'",
"}",
"if",
"op",
"not",
"in",
"valid_ops",
":",
"raise",
"ValueError",
"(",
"\"Invalid unary operator %s.\"",
"%",
"op",
")",
"@",
"with_doc",
"(",
"\"Unary Operator: '%s'\"",
"%",
"op",
")",
"@",
"with_name",
"(",
"unary_op_name",
"(",
"op",
")",
")",
"def",
"unary_operator",
"(",
"self",
")",
":",
"if",
"self",
".",
"dtype",
"!=",
"float64_dtype",
":",
"raise",
"TypeError",
"(",
"\"Can't apply unary operator {op!r} to instance of \"",
"\"{typename!r} with dtype {dtypename!r}.\\n\"",
"\"{op!r} is only supported for Factors of dtype \"",
"\"'float64'.\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"typename",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"dtypename",
"=",
"self",
".",
"dtype",
".",
"name",
",",
")",
")",
"# This can't be hoisted up a scope because the types returned by",
"# unary_op_return_type aren't defined when the top-level function is",
"# invoked.",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"return",
"NumExprFactor",
"(",
"\"{op}({expr})\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"expr",
"=",
"self",
".",
"_expr",
")",
",",
"self",
".",
"inputs",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"else",
":",
"return",
"NumExprFactor",
"(",
"\"{op}x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"return",
"unary_operator"
] | Factory function for making unary operator methods for Factors. | [
"Factory",
"function",
"for",
"making",
"unary",
"operator",
"methods",
"for",
"Factors",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L243-L282 |
25,908 | quantopian/zipline | zipline/pipeline/factors/factor.py | function_application | def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_doc(func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc | python | def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_doc(func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc | [
"def",
"function_application",
"(",
"func",
")",
":",
"if",
"func",
"not",
"in",
"NUMEXPR_MATH_FUNCS",
":",
"raise",
"ValueError",
"(",
"\"Unsupported mathematical function '%s'\"",
"%",
"func",
")",
"@",
"with_doc",
"(",
"func",
")",
"@",
"with_name",
"(",
"func",
")",
"def",
"mathfunc",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"return",
"NumExprFactor",
"(",
"\"{func}({expr})\"",
".",
"format",
"(",
"func",
"=",
"func",
",",
"expr",
"=",
"self",
".",
"_expr",
")",
",",
"self",
".",
"inputs",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"else",
":",
"return",
"NumExprFactor",
"(",
"\"{func}(x_0)\"",
".",
"format",
"(",
"func",
"=",
"func",
")",
",",
"(",
"self",
",",
")",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"return",
"mathfunc"
] | Factory function for producing function application methods for Factor
subclasses. | [
"Factory",
"function",
"for",
"producing",
"function",
"application",
"methods",
"for",
"Factor",
"subclasses",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L285-L308 |
25,909 | quantopian/zipline | zipline/pipeline/factors/factor.py | winsorize | def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | python | def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | [
"def",
"winsorize",
"(",
"row",
",",
"min_percentile",
",",
"max_percentile",
")",
":",
"a",
"=",
"row",
".",
"copy",
"(",
")",
"nan_count",
"=",
"isnan",
"(",
"row",
")",
".",
"sum",
"(",
")",
"nonnan_count",
"=",
"a",
".",
"size",
"-",
"nan_count",
"# NOTE: argsort() sorts nans to the end of the array.",
"idx",
"=",
"a",
".",
"argsort",
"(",
")",
"# Set values at indices below the min percentile to the value of the entry",
"# at the cutoff.",
"if",
"min_percentile",
">",
"0",
":",
"lower_cutoff",
"=",
"int",
"(",
"min_percentile",
"*",
"nonnan_count",
")",
"a",
"[",
"idx",
"[",
":",
"lower_cutoff",
"]",
"]",
"=",
"a",
"[",
"idx",
"[",
"lower_cutoff",
"]",
"]",
"# Set values at indices above the max percentile to the value of the entry",
"# at the cutoff.",
"if",
"max_percentile",
"<",
"1",
":",
"upper_cutoff",
"=",
"int",
"(",
"ceil",
"(",
"nonnan_count",
"*",
"max_percentile",
")",
")",
"# if max_percentile is close to 1, then upper_cutoff might not",
"# remove any values.",
"if",
"upper_cutoff",
"<",
"nonnan_count",
":",
"start_of_nans",
"=",
"(",
"-",
"nan_count",
")",
"if",
"nan_count",
"else",
"None",
"a",
"[",
"idx",
"[",
"upper_cutoff",
":",
"start_of_nans",
"]",
"]",
"=",
"a",
"[",
"idx",
"[",
"upper_cutoff",
"-",
"1",
"]",
"]",
"return",
"a"
] | This implementation is based on scipy.stats.mstats.winsorize | [
"This",
"implementation",
"is",
"based",
"on",
"scipy",
".",
"stats",
".",
"mstats",
".",
"winsorize"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1671-L1698 |
25,910 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.demean | def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) | python | def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) | [
"def",
"demean",
"(",
"self",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"return",
"GroupedRowTransform",
"(",
"transform",
"=",
"demean",
",",
"transform_args",
"=",
"(",
")",
",",
"factor",
"=",
"self",
",",
"groupby",
"=",
"groupby",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"window_safe",
"=",
"self",
".",
"window_safe",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby` | [
"Construct",
"a",
"Factor",
"that",
"computes",
"self",
"and",
"subtracts",
"the",
"mean",
"from",
"row",
"of",
"the",
"result",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L402-L524 |
25,911 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.zscore | def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
) | python | def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
) | [
"def",
"zscore",
"(",
"self",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"return",
"GroupedRowTransform",
"(",
"transform",
"=",
"zscore",
",",
"transform_args",
"=",
"(",
")",
",",
"factor",
"=",
"self",
",",
"groupby",
"=",
"groupby",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"mask",
"=",
"mask",
",",
"window_safe",
"=",
"True",
",",
")"
] | Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby` | [
"Construct",
"a",
"Factor",
"that",
"Z",
"-",
"Scores",
"each",
"day",
"s",
"results",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L531-L591 |
25,912 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.rank | def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
) | python | def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
) | [
"def",
"rank",
"(",
"self",
",",
"method",
"=",
"'ordinal'",
",",
"ascending",
"=",
"True",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"if",
"groupby",
"is",
"NotSpecified",
":",
"return",
"Rank",
"(",
"self",
",",
"method",
"=",
"method",
",",
"ascending",
"=",
"ascending",
",",
"mask",
"=",
"mask",
")",
"return",
"GroupedRowTransform",
"(",
"transform",
"=",
"rankdata",
"if",
"ascending",
"else",
"rankdata_1d_descending",
",",
"transform_args",
"=",
"(",
"method",
",",
")",
",",
"factor",
"=",
"self",
",",
"groupby",
"=",
"groupby",
",",
"dtype",
"=",
"float64_dtype",
",",
"missing_value",
"=",
"nan",
",",
"mask",
"=",
"mask",
",",
"window_safe",
"=",
"True",
",",
")"
] | Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank` | [
"Construct",
"a",
"new",
"Factor",
"representing",
"the",
"sorted",
"rank",
"of",
"each",
"column",
"within",
"each",
"row",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L593-L651 |
25,913 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.pearsonr | def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) | python | def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) | [
"def",
"pearsonr",
"(",
"self",
",",
"target",
",",
"correlation_length",
",",
"mask",
"=",
"NotSpecified",
")",
":",
"from",
".",
"statistical",
"import",
"RollingPearson",
"return",
"RollingPearson",
"(",
"base_factor",
"=",
"self",
",",
"target",
"=",
"target",
",",
"correlation_length",
"=",
"correlation_length",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr` | [
"Construct",
"a",
"new",
"Factor",
"that",
"computes",
"rolling",
"pearson",
"correlation",
"coefficients",
"between",
"target",
"and",
"the",
"columns",
"of",
"self",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L656-L716 |
25,914 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.spearmanr | def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) | python | def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) | [
"def",
"spearmanr",
"(",
"self",
",",
"target",
",",
"correlation_length",
",",
"mask",
"=",
"NotSpecified",
")",
":",
"from",
".",
"statistical",
"import",
"RollingSpearman",
"return",
"RollingSpearman",
"(",
"base_factor",
"=",
"self",
",",
"target",
"=",
"target",
",",
"correlation_length",
"=",
"correlation_length",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr` | [
"Construct",
"a",
"new",
"Factor",
"that",
"computes",
"rolling",
"spearman",
"rank",
"correlation",
"coefficients",
"between",
"target",
"and",
"the",
"columns",
"of",
"self",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L721-L781 |
25,915 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.linear_regression | def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
) | python | def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
) | [
"def",
"linear_regression",
"(",
"self",
",",
"target",
",",
"regression_length",
",",
"mask",
"=",
"NotSpecified",
")",
":",
"from",
".",
"statistical",
"import",
"RollingLinearRegression",
"return",
"RollingLinearRegression",
"(",
"dependent",
"=",
"self",
",",
"independent",
"=",
"target",
",",
"regression_length",
"=",
"regression_length",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns` | [
"Construct",
"a",
"new",
"Factor",
"that",
"performs",
"an",
"ordinary",
"least",
"-",
"squares",
"regression",
"predicting",
"the",
"columns",
"of",
"self",
"from",
"target",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L786-L843 |
25,916 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.winsorize | def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
) | python | def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
) | [
"def",
"winsorize",
"(",
"self",
",",
"min_percentile",
",",
"max_percentile",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"if",
"not",
"0.0",
"<=",
"min_percentile",
"<",
"max_percentile",
"<=",
"1.0",
":",
"raise",
"BadPercentileBounds",
"(",
"min_percentile",
"=",
"min_percentile",
",",
"max_percentile",
"=",
"max_percentile",
",",
"upper_bound",
"=",
"1.0",
",",
")",
"return",
"GroupedRowTransform",
"(",
"transform",
"=",
"winsorize",
",",
"transform_args",
"=",
"(",
"min_percentile",
",",
"max_percentile",
")",
",",
"factor",
"=",
"self",
",",
"groupby",
"=",
"groupby",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"mask",
"=",
"mask",
",",
"window_safe",
"=",
"self",
".",
"window_safe",
",",
")"
] | Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby` | [
"Construct",
"a",
"new",
"factor",
"that",
"winsorizes",
"the",
"result",
"of",
"this",
"factor",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L852-L947 |
25,917 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.quantiles | def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask) | python | def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask) | [
"def",
"quantiles",
"(",
"self",
",",
"bins",
",",
"mask",
"=",
"NotSpecified",
")",
":",
"if",
"mask",
"is",
"NotSpecified",
":",
"mask",
"=",
"self",
".",
"mask",
"return",
"Quantiles",
"(",
"inputs",
"=",
"(",
"self",
",",
")",
",",
"bins",
"=",
"bins",
",",
"mask",
"=",
"mask",
")"
] | Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1). | [
"Construct",
"a",
"Classifier",
"computing",
"quantiles",
"of",
"the",
"output",
"of",
"self",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L950-L974 |
25,918 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.top | def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N | python | def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N | [
"def",
"top",
"(",
"self",
",",
"N",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"if",
"N",
"==",
"1",
":",
"# Special case: if N == 1, we can avoid doing a full sort on every",
"# group, which is a big win.",
"return",
"self",
".",
"_maximum",
"(",
"mask",
"=",
"mask",
",",
"groupby",
"=",
"groupby",
")",
"return",
"self",
".",
"rank",
"(",
"ascending",
"=",
"False",
",",
"mask",
"=",
"mask",
",",
"groupby",
"=",
"groupby",
")",
"<=",
"N"
] | Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter | [
"Construct",
"a",
"Filter",
"matching",
"the",
"top",
"N",
"asset",
"values",
"of",
"self",
"each",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1048-L1074 |
25,919 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.bottom | def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N | python | def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N | [
"def",
"bottom",
"(",
"self",
",",
"N",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"return",
"self",
".",
"rank",
"(",
"ascending",
"=",
"True",
",",
"mask",
"=",
"mask",
",",
"groupby",
"=",
"groupby",
")",
"<=",
"N"
] | Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter | [
"Construct",
"a",
"Filter",
"matching",
"the",
"bottom",
"N",
"asset",
"values",
"of",
"self",
"each",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1076-L1098 |
25,920 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.percentile_between | def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
) | python | def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
) | [
"def",
"percentile_between",
"(",
"self",
",",
"min_percentile",
",",
"max_percentile",
",",
"mask",
"=",
"NotSpecified",
")",
":",
"return",
"PercentileFilter",
"(",
"self",
",",
"min_percentile",
"=",
"min_percentile",
",",
"max_percentile",
"=",
"max_percentile",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter | [
"Construct",
"a",
"new",
"Filter",
"representing",
"entries",
"from",
"the",
"output",
"of",
"this",
"Factor",
"that",
"fall",
"within",
"the",
"percentile",
"range",
"defined",
"by",
"min_percentile",
"and",
"max_percentile",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1103-L1139 |
25,921 | quantopian/zipline | zipline/pipeline/factors/factor.py | Rank._validate | def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate() | python | def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate() | [
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_method",
"not",
"in",
"_RANK_METHODS",
":",
"raise",
"UnknownRankMethod",
"(",
"method",
"=",
"self",
".",
"_method",
",",
"choices",
"=",
"set",
"(",
"_RANK_METHODS",
")",
",",
")",
"return",
"super",
"(",
"Rank",
",",
"self",
")",
".",
"_validate",
"(",
")"
] | Verify that the stored rank method is valid. | [
"Verify",
"that",
"the",
"stored",
"rank",
"method",
"is",
"valid",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1382-L1391 |
25,922 | quantopian/zipline | zipline/pipeline/factors/factor.py | Rank._compute | def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) | python | def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) | [
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"return",
"masked_rankdata_2d",
"(",
"arrays",
"[",
"0",
"]",
",",
"mask",
",",
"self",
".",
"inputs",
"[",
"0",
"]",
".",
"missing_value",
",",
"self",
".",
"_method",
",",
"self",
".",
"_ascending",
",",
")"
] | For each row in the input, compute a like-shaped array of per-row
ranks. | [
"For",
"each",
"row",
"in",
"the",
"input",
"compute",
"a",
"like",
"-",
"shaped",
"array",
"of",
"per",
"-",
"row",
"ranks",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1393-L1404 |
25,923 | quantopian/zipline | zipline/utils/pandas_utils.py | find_in_sorted_index | def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if ix == len(dts) or dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix | python | def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if ix == len(dts) or dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix | [
"def",
"find_in_sorted_index",
"(",
"dts",
",",
"dt",
")",
":",
"ix",
"=",
"dts",
".",
"searchsorted",
"(",
"dt",
")",
"if",
"ix",
"==",
"len",
"(",
"dts",
")",
"or",
"dts",
"[",
"ix",
"]",
"!=",
"dt",
":",
"raise",
"LookupError",
"(",
"\"{dt} is not in {dts}\"",
".",
"format",
"(",
"dt",
"=",
"dt",
",",
"dts",
"=",
"dts",
")",
")",
"return",
"ix"
] | Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``. | [
"Find",
"the",
"index",
"of",
"dt",
"in",
"dts",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L114-L142 |
25,924 | quantopian/zipline | zipline/utils/pandas_utils.py | nearest_unequal_elements | def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value | python | def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value | [
"def",
"nearest_unequal_elements",
"(",
"dts",
",",
"dt",
")",
":",
"if",
"not",
"dts",
".",
"is_unique",
":",
"raise",
"ValueError",
"(",
"\"dts must be unique\"",
")",
"if",
"not",
"dts",
".",
"is_monotonic_increasing",
":",
"raise",
"ValueError",
"(",
"\"dts must be sorted in increasing order\"",
")",
"if",
"not",
"len",
"(",
"dts",
")",
":",
"return",
"None",
",",
"None",
"sortpos",
"=",
"dts",
".",
"searchsorted",
"(",
"dt",
",",
"side",
"=",
"'left'",
")",
"try",
":",
"sortval",
"=",
"dts",
"[",
"sortpos",
"]",
"except",
"IndexError",
":",
"# dt is greater than any value in the array.",
"return",
"dts",
"[",
"-",
"1",
"]",
",",
"None",
"if",
"dt",
"<",
"sortval",
":",
"lower_ix",
"=",
"sortpos",
"-",
"1",
"upper_ix",
"=",
"sortpos",
"elif",
"dt",
"==",
"sortval",
":",
"lower_ix",
"=",
"sortpos",
"-",
"1",
"upper_ix",
"=",
"sortpos",
"+",
"1",
"else",
":",
"lower_ix",
"=",
"sortpos",
"upper_ix",
"=",
"sortpos",
"+",
"1",
"lower_value",
"=",
"dts",
"[",
"lower_ix",
"]",
"if",
"lower_ix",
">=",
"0",
"else",
"None",
"upper_value",
"=",
"dts",
"[",
"upper_ix",
"]",
"if",
"upper_ix",
"<",
"len",
"(",
"dts",
")",
"else",
"None",
"return",
"lower_value",
",",
"upper_value"
] | Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds. | [
"Find",
"values",
"in",
"dts",
"closest",
"but",
"not",
"equal",
"to",
"dt",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L145-L192 |
25,925 | quantopian/zipline | zipline/utils/pandas_utils.py | categorical_df_concat | def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list) | python | def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list) | [
"def",
"categorical_df_concat",
"(",
"df_list",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"not",
"inplace",
":",
"df_list",
"=",
"deepcopy",
"(",
"df_list",
")",
"# Assert each dataframe has the same columns/dtypes",
"df",
"=",
"df_list",
"[",
"0",
"]",
"if",
"not",
"all",
"(",
"[",
"(",
"df",
".",
"dtypes",
".",
"equals",
"(",
"df_i",
".",
"dtypes",
")",
")",
"for",
"df_i",
"in",
"df_list",
"[",
"1",
":",
"]",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Input DataFrames must have the same columns/dtypes.\"",
")",
"categorical_columns",
"=",
"df",
".",
"columns",
"[",
"df",
".",
"dtypes",
"==",
"'category'",
"]",
"for",
"col",
"in",
"categorical_columns",
":",
"new_categories",
"=",
"sorted",
"(",
"set",
"(",
")",
".",
"union",
"(",
"*",
"(",
"frame",
"[",
"col",
"]",
".",
"cat",
".",
"categories",
"for",
"frame",
"in",
"df_list",
")",
")",
")",
"with",
"ignore_pandas_nan_categorical_warning",
"(",
")",
":",
"for",
"df",
"in",
"df_list",
":",
"df",
"[",
"col",
"]",
".",
"cat",
".",
"set_categories",
"(",
"new_categories",
",",
"inplace",
"=",
"True",
")",
"return",
"pd",
".",
"concat",
"(",
"df_list",
")"
] | Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list. | [
"Prepare",
"list",
"of",
"pandas",
"DataFrames",
"to",
"be",
"used",
"as",
"input",
"to",
"pd",
".",
"concat",
".",
"Ensure",
"any",
"columns",
"of",
"type",
"category",
"have",
"the",
"same",
"categories",
"across",
"each",
"dataframe",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L247-L287 |
25,926 | quantopian/zipline | zipline/utils/pandas_utils.py | check_indexes_all_same | def check_indexes_all_same(indexes, message="Indexes are not equal."):
"""Check that a list of Index objects are all equal.
Parameters
----------
indexes : iterable[pd.Index]
Iterable of indexes to check.
Raises
------
ValueError
If the indexes are not all the same.
"""
iterator = iter(indexes)
first = next(iterator)
for other in iterator:
same = (first == other)
if not same.all():
bad_loc = np.flatnonzero(~same)[0]
raise ValueError(
"{}\nFirst difference is at index {}: "
"{} != {}".format(
message, bad_loc, first[bad_loc], other[bad_loc]
),
) | python | def check_indexes_all_same(indexes, message="Indexes are not equal."):
"""Check that a list of Index objects are all equal.
Parameters
----------
indexes : iterable[pd.Index]
Iterable of indexes to check.
Raises
------
ValueError
If the indexes are not all the same.
"""
iterator = iter(indexes)
first = next(iterator)
for other in iterator:
same = (first == other)
if not same.all():
bad_loc = np.flatnonzero(~same)[0]
raise ValueError(
"{}\nFirst difference is at index {}: "
"{} != {}".format(
message, bad_loc, first[bad_loc], other[bad_loc]
),
) | [
"def",
"check_indexes_all_same",
"(",
"indexes",
",",
"message",
"=",
"\"Indexes are not equal.\"",
")",
":",
"iterator",
"=",
"iter",
"(",
"indexes",
")",
"first",
"=",
"next",
"(",
"iterator",
")",
"for",
"other",
"in",
"iterator",
":",
"same",
"=",
"(",
"first",
"==",
"other",
")",
"if",
"not",
"same",
".",
"all",
"(",
")",
":",
"bad_loc",
"=",
"np",
".",
"flatnonzero",
"(",
"~",
"same",
")",
"[",
"0",
"]",
"raise",
"ValueError",
"(",
"\"{}\\nFirst difference is at index {}: \"",
"\"{} != {}\"",
".",
"format",
"(",
"message",
",",
"bad_loc",
",",
"first",
"[",
"bad_loc",
"]",
",",
"other",
"[",
"bad_loc",
"]",
")",
",",
")"
] | Check that a list of Index objects are all equal.
Parameters
----------
indexes : iterable[pd.Index]
Iterable of indexes to check.
Raises
------
ValueError
If the indexes are not all the same. | [
"Check",
"that",
"a",
"list",
"of",
"Index",
"objects",
"are",
"all",
"equal",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L325-L349 |
25,927 | quantopian/zipline | zipline/pipeline/loaders/events.py | required_event_fields | def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
) | python | def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
) | [
"def",
"required_event_fields",
"(",
"next_value_columns",
",",
"previous_value_columns",
")",
":",
"# These metadata columns are used to align event indexers.",
"return",
"{",
"TS_FIELD_NAME",
",",
"SID_FIELD_NAME",
",",
"EVENT_DATE_FIELD_NAME",
",",
"}",
".",
"union",
"(",
"# We also expect any of the field names that our loadable columns",
"# are mapped to.",
"viewvalues",
"(",
"next_value_columns",
")",
",",
"viewvalues",
"(",
"previous_value_columns",
")",
",",
")"
] | Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``. | [
"Compute",
"the",
"set",
"of",
"resource",
"columns",
"required",
"to",
"serve",
"next_value_columns",
"and",
"previous_value_columns",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L21-L36 |
25,928 | quantopian/zipline | zipline/pipeline/loaders/events.py | validate_column_specs | def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) | python | def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) | [
"def",
"validate_column_specs",
"(",
"events",
",",
"next_value_columns",
",",
"previous_value_columns",
")",
":",
"required",
"=",
"required_event_fields",
"(",
"next_value_columns",
",",
"previous_value_columns",
")",
"received",
"=",
"set",
"(",
"events",
".",
"columns",
")",
"missing",
"=",
"required",
"-",
"received",
"if",
"missing",
":",
"raise",
"ValueError",
"(",
"\"EventsLoader missing required columns {missing}.\\n\"",
"\"Got Columns: {received}\\n\"",
"\"Expected Columns: {required}\"",
".",
"format",
"(",
"missing",
"=",
"sorted",
"(",
"missing",
")",
",",
"received",
"=",
"sorted",
"(",
"received",
")",
",",
"required",
"=",
"sorted",
"(",
"required",
")",
",",
")",
")"
] | Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``. | [
"Verify",
"that",
"the",
"columns",
"of",
"events",
"can",
"be",
"used",
"by",
"an",
"EventsLoader",
"to",
"serve",
"the",
"BoundColumns",
"described",
"by",
"next_value_columns",
"and",
"previous_value_columns",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L39-L58 |
25,929 | quantopian/zipline | zipline/pipeline/loaders/events.py | EventsLoader.split_next_and_previous_event_columns | def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
"""
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ()) | python | def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
"""
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ()) | [
"def",
"split_next_and_previous_event_columns",
"(",
"self",
",",
"requested_columns",
")",
":",
"def",
"next_or_previous",
"(",
"c",
")",
":",
"if",
"c",
"in",
"self",
".",
"next_value_columns",
":",
"return",
"'next'",
"elif",
"c",
"in",
"self",
".",
"previous_value_columns",
":",
"return",
"'previous'",
"raise",
"ValueError",
"(",
"\"{c} not found in next_value_columns \"",
"\"or previous_value_columns\"",
".",
"format",
"(",
"c",
"=",
"c",
")",
")",
"groups",
"=",
"groupby",
"(",
"next_or_previous",
",",
"requested_columns",
")",
"return",
"groups",
".",
"get",
"(",
"'next'",
",",
"(",
")",
")",
",",
"groups",
".",
"get",
"(",
"'previous'",
",",
"(",
")",
")"
] | Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event | [
"Split",
"requested",
"columns",
"into",
"columns",
"that",
"should",
"load",
"the",
"next",
"known",
"value",
"and",
"columns",
"that",
"should",
"load",
"the",
"previous",
"known",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L119-L146 |
25,930 | quantopian/zipline | zipline/lib/labelarray.py | compare_arrays | def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
) | python | def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
) | [
"def",
"compare_arrays",
"(",
"left",
",",
"right",
")",
":",
"return",
"(",
"left",
"is",
"right",
"or",
"(",
"(",
"left",
".",
"shape",
"==",
"right",
".",
"shape",
")",
"and",
"(",
"left",
"==",
"right",
")",
".",
"all",
"(",
")",
")",
")"
] | Eq check with a short-circuit for identical objects. | [
"Eq",
"check",
"with",
"a",
"short",
"-",
"circuit",
"for",
"identical",
"objects",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L38-L43 |
25,931 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.from_codes_and_metadata | def from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret | python | def from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret | [
"def",
"from_codes_and_metadata",
"(",
"cls",
",",
"codes",
",",
"categories",
",",
"reverse_categories",
",",
"missing_value",
")",
":",
"ret",
"=",
"codes",
".",
"view",
"(",
"type",
"=",
"cls",
",",
"dtype",
"=",
"np",
".",
"void",
")",
"ret",
".",
"_categories",
"=",
"categories",
"ret",
".",
"_reverse_categories",
"=",
"reverse_categories",
"ret",
".",
"_missing_value",
"=",
"missing_value",
"return",
"ret"
] | Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data. | [
"Rehydrate",
"a",
"LabelArray",
"from",
"the",
"codes",
"and",
"metadata",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L194-L217 |
25,932 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.as_int_array | def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
) | python | def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
) | [
"def",
"as_int_array",
"(",
"self",
")",
":",
"return",
"self",
".",
"view",
"(",
"type",
"=",
"ndarray",
",",
"dtype",
"=",
"unsigned_int_dtype_with_size_in_bytes",
"(",
"self",
".",
"itemsize",
")",
",",
")"
] | Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data. | [
"Convert",
"self",
"into",
"a",
"regular",
"ndarray",
"of",
"ints",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L303-L312 |
25,933 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.as_categorical | def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
) | python | def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
) | [
"def",
"as_categorical",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"shape",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Can't convert a 2D array to a categorical.\"",
")",
"with",
"ignore_pandas_nan_categorical_warning",
"(",
")",
":",
"return",
"pd",
".",
"Categorical",
".",
"from_codes",
"(",
"self",
".",
"as_int_array",
"(",
")",
",",
"# We need to make a copy because pandas >= 0.17 fails if this",
"# buffer isn't writeable.",
"self",
".",
"categories",
".",
"copy",
"(",
")",
",",
"ordered",
"=",
"False",
",",
")"
] | Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports. | [
"Coerce",
"self",
"into",
"a",
"pandas",
"categorical",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L322-L338 |
25,934 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.as_categorical_frame | def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(),
name=name,
).unstack() | python | def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(),
name=name,
).unstack() | [
"def",
"as_categorical_frame",
"(",
"self",
",",
"index",
",",
"columns",
",",
"name",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"Can't convert a non-2D LabelArray into a DataFrame.\"",
")",
"expected_shape",
"=",
"(",
"len",
"(",
"index",
")",
",",
"len",
"(",
"columns",
")",
")",
"if",
"expected_shape",
"!=",
"self",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"Can't construct a DataFrame with provided indices:\\n\\n\"",
"\"LabelArray shape is {actual}, but index and columns imply \"",
"\"that shape should be {expected}.\"",
".",
"format",
"(",
"actual",
"=",
"self",
".",
"shape",
",",
"expected",
"=",
"expected_shape",
",",
")",
")",
"return",
"pd",
".",
"Series",
"(",
"index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"[",
"index",
",",
"columns",
"]",
")",
",",
"data",
"=",
"self",
".",
"ravel",
"(",
")",
".",
"as_categorical",
"(",
")",
",",
"name",
"=",
"name",
",",
")",
".",
"unstack",
"(",
")"
] | Coerce self into a pandas DataFrame of Categoricals. | [
"Coerce",
"self",
"into",
"a",
"pandas",
"DataFrame",
"of",
"Categoricals",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L340-L364 |
25,935 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.set_scalar | def set_scalar(self, indexer, value):
"""
Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array.
"""
try:
value_code = self.reverse_categories[value]
except KeyError:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code | python | def set_scalar(self, indexer, value):
"""
Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array.
"""
try:
value_code = self.reverse_categories[value]
except KeyError:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code | [
"def",
"set_scalar",
"(",
"self",
",",
"indexer",
",",
"value",
")",
":",
"try",
":",
"value_code",
"=",
"self",
".",
"reverse_categories",
"[",
"value",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"%r is not in LabelArray categories.\"",
"%",
"value",
")",
"self",
".",
"as_int_array",
"(",
")",
"[",
"indexer",
"]",
"=",
"value_code"
] | Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array. | [
"Set",
"scalar",
"value",
"into",
"the",
"array",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L400-L422 |
25,936 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.empty_like | def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
) | python | def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
) | [
"def",
"empty_like",
"(",
"self",
",",
"shape",
")",
":",
"return",
"type",
"(",
"self",
")",
".",
"from_codes_and_metadata",
"(",
"codes",
"=",
"np",
".",
"full",
"(",
"shape",
",",
"self",
".",
"reverse_categories",
"[",
"self",
".",
"missing_value",
"]",
",",
"dtype",
"=",
"unsigned_int_dtype_with_size_in_bytes",
"(",
"self",
".",
"itemsize",
")",
",",
")",
",",
"categories",
"=",
"self",
".",
"categories",
",",
"reverse_categories",
"=",
"self",
".",
"reverse_categories",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
")"
] | Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``. | [
"Make",
"an",
"empty",
"LabelArray",
"with",
"the",
"same",
"categories",
"as",
"self",
"filled",
"with",
"self",
".",
"missing_value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L605-L619 |
25,937 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.map_predicate | def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()] | python | def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()] | [
"def",
"map_predicate",
"(",
"self",
",",
"f",
")",
":",
"# Functions passed to this are of type str -> bool. Don't ever call",
"# them on None, which is the only non-str value we ever store in",
"# categories.",
"if",
"self",
".",
"missing_value",
"is",
"None",
":",
"def",
"f_to_use",
"(",
"x",
")",
":",
"return",
"False",
"if",
"x",
"is",
"None",
"else",
"f",
"(",
"x",
")",
"else",
":",
"f_to_use",
"=",
"f",
"# Call f on each unique value in our categories.",
"results",
"=",
"np",
".",
"vectorize",
"(",
"f_to_use",
",",
"otypes",
"=",
"[",
"bool_dtype",
"]",
")",
"(",
"self",
".",
"categories",
")",
"# missing_value should produce False no matter what",
"results",
"[",
"self",
".",
"reverse_categories",
"[",
"self",
".",
"missing_value",
"]",
"]",
"=",
"False",
"# unpack the results form each unique value into their corresponding",
"# locations in our indices.",
"return",
"results",
"[",
"self",
".",
"as_int_array",
"(",
")",
"]"
] | Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False. | [
"Map",
"a",
"function",
"from",
"str",
"-",
">",
"bool",
"element",
"-",
"wise",
"over",
"self",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L621-L645 |
25,938 | quantopian/zipline | zipline/lib/labelarray.py | LabelArray.map | def map(self, f):
"""
Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``.
"""
# f() should only return None if None is our missing value.
if self.missing_value is None:
allowed_outtypes = self.SUPPORTED_SCALAR_TYPES
else:
allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES
def f_to_use(x,
missing_value=self.missing_value,
otypes=allowed_outtypes):
# Don't call f on the missing value; those locations don't exist
# semantically. We return _sortable_sentinel rather than None
# because the np.unique call below sorts the categories array,
# which raises an error on Python 3 because None and str aren't
# comparable.
if x == missing_value:
return _sortable_sentinel
ret = f(x)
if not isinstance(ret, otypes):
raise TypeError(
"LabelArray.map expected function {f} to return a string"
" or None, but got {type} instead.\n"
"Value was {value}.".format(
f=f.__name__,
type=type(ret).__name__,
value=ret,
)
)
if ret == missing_value:
return _sortable_sentinel
return ret
new_categories_with_duplicates = (
np.vectorize(f_to_use, otypes=[object])(self.categories)
)
# If f() maps multiple inputs to the same output, then we can end up
# with the same code duplicated multiple times. Compress the categories
# by running them through np.unique, and then use the reverse lookup
# table to compress codes as well.
new_categories, bloated_inverse_index = np.unique(
new_categories_with_duplicates,
return_inverse=True
)
if new_categories[0] is _sortable_sentinel:
# f_to_use return _sortable_sentinel for locations that should be
# missing values in our output. Since np.unique returns the uniques
# in sorted order, and since _sortable_sentinel sorts before any
# string, we only need to check the first array entry.
new_categories[0] = self.missing_value
# `reverse_index` will always be a 64 bit integer even if we can hold a
# smaller array.
reverse_index = bloated_inverse_index.astype(
smallest_uint_that_can_hold(len(new_categories))
)
new_codes = np.take(reverse_index, self.as_int_array())
return self.from_codes_and_metadata(
new_codes,
new_categories,
dict(zip(new_categories, range(len(new_categories)))),
missing_value=self.missing_value,
) | python | def map(self, f):
"""
Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``.
"""
# f() should only return None if None is our missing value.
if self.missing_value is None:
allowed_outtypes = self.SUPPORTED_SCALAR_TYPES
else:
allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES
def f_to_use(x,
missing_value=self.missing_value,
otypes=allowed_outtypes):
# Don't call f on the missing value; those locations don't exist
# semantically. We return _sortable_sentinel rather than None
# because the np.unique call below sorts the categories array,
# which raises an error on Python 3 because None and str aren't
# comparable.
if x == missing_value:
return _sortable_sentinel
ret = f(x)
if not isinstance(ret, otypes):
raise TypeError(
"LabelArray.map expected function {f} to return a string"
" or None, but got {type} instead.\n"
"Value was {value}.".format(
f=f.__name__,
type=type(ret).__name__,
value=ret,
)
)
if ret == missing_value:
return _sortable_sentinel
return ret
new_categories_with_duplicates = (
np.vectorize(f_to_use, otypes=[object])(self.categories)
)
# If f() maps multiple inputs to the same output, then we can end up
# with the same code duplicated multiple times. Compress the categories
# by running them through np.unique, and then use the reverse lookup
# table to compress codes as well.
new_categories, bloated_inverse_index = np.unique(
new_categories_with_duplicates,
return_inverse=True
)
if new_categories[0] is _sortable_sentinel:
# f_to_use return _sortable_sentinel for locations that should be
# missing values in our output. Since np.unique returns the uniques
# in sorted order, and since _sortable_sentinel sorts before any
# string, we only need to check the first array entry.
new_categories[0] = self.missing_value
# `reverse_index` will always be a 64 bit integer even if we can hold a
# smaller array.
reverse_index = bloated_inverse_index.astype(
smallest_uint_that_can_hold(len(new_categories))
)
new_codes = np.take(reverse_index, self.as_int_array())
return self.from_codes_and_metadata(
new_codes,
new_categories,
dict(zip(new_categories, range(len(new_categories)))),
missing_value=self.missing_value,
) | [
"def",
"map",
"(",
"self",
",",
"f",
")",
":",
"# f() should only return None if None is our missing value.",
"if",
"self",
".",
"missing_value",
"is",
"None",
":",
"allowed_outtypes",
"=",
"self",
".",
"SUPPORTED_SCALAR_TYPES",
"else",
":",
"allowed_outtypes",
"=",
"self",
".",
"SUPPORTED_NON_NONE_SCALAR_TYPES",
"def",
"f_to_use",
"(",
"x",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"otypes",
"=",
"allowed_outtypes",
")",
":",
"# Don't call f on the missing value; those locations don't exist",
"# semantically. We return _sortable_sentinel rather than None",
"# because the np.unique call below sorts the categories array,",
"# which raises an error on Python 3 because None and str aren't",
"# comparable.",
"if",
"x",
"==",
"missing_value",
":",
"return",
"_sortable_sentinel",
"ret",
"=",
"f",
"(",
"x",
")",
"if",
"not",
"isinstance",
"(",
"ret",
",",
"otypes",
")",
":",
"raise",
"TypeError",
"(",
"\"LabelArray.map expected function {f} to return a string\"",
"\" or None, but got {type} instead.\\n\"",
"\"Value was {value}.\"",
".",
"format",
"(",
"f",
"=",
"f",
".",
"__name__",
",",
"type",
"=",
"type",
"(",
"ret",
")",
".",
"__name__",
",",
"value",
"=",
"ret",
",",
")",
")",
"if",
"ret",
"==",
"missing_value",
":",
"return",
"_sortable_sentinel",
"return",
"ret",
"new_categories_with_duplicates",
"=",
"(",
"np",
".",
"vectorize",
"(",
"f_to_use",
",",
"otypes",
"=",
"[",
"object",
"]",
")",
"(",
"self",
".",
"categories",
")",
")",
"# If f() maps multiple inputs to the same output, then we can end up",
"# with the same code duplicated multiple times. Compress the categories",
"# by running them through np.unique, and then use the reverse lookup",
"# table to compress codes as well.",
"new_categories",
",",
"bloated_inverse_index",
"=",
"np",
".",
"unique",
"(",
"new_categories_with_duplicates",
",",
"return_inverse",
"=",
"True",
")",
"if",
"new_categories",
"[",
"0",
"]",
"is",
"_sortable_sentinel",
":",
"# f_to_use return _sortable_sentinel for locations that should be",
"# missing values in our output. Since np.unique returns the uniques",
"# in sorted order, and since _sortable_sentinel sorts before any",
"# string, we only need to check the first array entry.",
"new_categories",
"[",
"0",
"]",
"=",
"self",
".",
"missing_value",
"# `reverse_index` will always be a 64 bit integer even if we can hold a",
"# smaller array.",
"reverse_index",
"=",
"bloated_inverse_index",
".",
"astype",
"(",
"smallest_uint_that_can_hold",
"(",
"len",
"(",
"new_categories",
")",
")",
")",
"new_codes",
"=",
"np",
".",
"take",
"(",
"reverse_index",
",",
"self",
".",
"as_int_array",
"(",
")",
")",
"return",
"self",
".",
"from_codes_and_metadata",
"(",
"new_codes",
",",
"new_categories",
",",
"dict",
"(",
"zip",
"(",
"new_categories",
",",
"range",
"(",
"len",
"(",
"new_categories",
")",
")",
")",
")",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
")"
] | Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``. | [
"Map",
"a",
"function",
"from",
"str",
"-",
">",
"str",
"element",
"-",
"wise",
"over",
"self",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L647-L722 |
25,939 | quantopian/zipline | zipline/finance/execution.py | asymmetric_round_price | def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95):
"""
Asymmetric rounding function for adjusting prices to the specified number
of places in a way that "improves" the price. For limit prices, this means
preferring to round down on buys and preferring to round up on sells.
For stop prices, it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a specified decimal place, use it.
If prefer_round_down == False:
When .95 below to .05 above a specified decimal place, use it.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
"""
precision = zp_math.number_of_decimal_places(tick_size)
multiplier = int(tick_size * (10 ** precision))
diff -= 0.5 # shift the difference down
diff *= (10 ** -precision) # adjust diff to precision of tick size
diff *= multiplier # adjust diff to value of tick_size
# Subtracting an epsilon from diff to enforce the open-ness of the upper
# bound on buys and the lower bound on sells. Using the actual system
# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
epsilon = float_info.epsilon * 10
diff = diff - epsilon
# relies on rounding half away from zero, unlike numpy's bankers' rounding
rounded = tick_size * consistent_round(
(price - (diff if prefer_round_down else -diff)) / tick_size
)
if zp_math.tolerant_equals(rounded, 0.0):
return 0.0
return rounded | python | def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95):
"""
Asymmetric rounding function for adjusting prices to the specified number
of places in a way that "improves" the price. For limit prices, this means
preferring to round down on buys and preferring to round up on sells.
For stop prices, it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a specified decimal place, use it.
If prefer_round_down == False:
When .95 below to .05 above a specified decimal place, use it.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
"""
precision = zp_math.number_of_decimal_places(tick_size)
multiplier = int(tick_size * (10 ** precision))
diff -= 0.5 # shift the difference down
diff *= (10 ** -precision) # adjust diff to precision of tick size
diff *= multiplier # adjust diff to value of tick_size
# Subtracting an epsilon from diff to enforce the open-ness of the upper
# bound on buys and the lower bound on sells. Using the actual system
# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
epsilon = float_info.epsilon * 10
diff = diff - epsilon
# relies on rounding half away from zero, unlike numpy's bankers' rounding
rounded = tick_size * consistent_round(
(price - (diff if prefer_round_down else -diff)) / tick_size
)
if zp_math.tolerant_equals(rounded, 0.0):
return 0.0
return rounded | [
"def",
"asymmetric_round_price",
"(",
"price",
",",
"prefer_round_down",
",",
"tick_size",
",",
"diff",
"=",
"0.95",
")",
":",
"precision",
"=",
"zp_math",
".",
"number_of_decimal_places",
"(",
"tick_size",
")",
"multiplier",
"=",
"int",
"(",
"tick_size",
"*",
"(",
"10",
"**",
"precision",
")",
")",
"diff",
"-=",
"0.5",
"# shift the difference down",
"diff",
"*=",
"(",
"10",
"**",
"-",
"precision",
")",
"# adjust diff to precision of tick size",
"diff",
"*=",
"multiplier",
"# adjust diff to value of tick_size",
"# Subtracting an epsilon from diff to enforce the open-ness of the upper",
"# bound on buys and the lower bound on sells. Using the actual system",
"# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.",
"epsilon",
"=",
"float_info",
".",
"epsilon",
"*",
"10",
"diff",
"=",
"diff",
"-",
"epsilon",
"# relies on rounding half away from zero, unlike numpy's bankers' rounding",
"rounded",
"=",
"tick_size",
"*",
"consistent_round",
"(",
"(",
"price",
"-",
"(",
"diff",
"if",
"prefer_round_down",
"else",
"-",
"diff",
")",
")",
"/",
"tick_size",
")",
"if",
"zp_math",
".",
"tolerant_equals",
"(",
"rounded",
",",
"0.0",
")",
":",
"return",
"0.0",
"return",
"rounded"
] | Asymmetric rounding function for adjusting prices to the specified number
of places in a way that "improves" the price. For limit prices, this means
preferring to round down on buys and preferring to round up on sells.
For stop prices, it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a specified decimal place, use it.
If prefer_round_down == False:
When .95 below to .05 above a specified decimal place, use it.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01. | [
"Asymmetric",
"rounding",
"function",
"for",
"adjusting",
"prices",
"to",
"the",
"specified",
"number",
"of",
"places",
"in",
"a",
"way",
"that",
"improves",
"the",
"price",
".",
"For",
"limit",
"prices",
"this",
"means",
"preferring",
"to",
"round",
"down",
"on",
"buys",
"and",
"preferring",
"to",
"round",
"up",
"on",
"sells",
".",
"For",
"stop",
"prices",
"it",
"means",
"the",
"reverse",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/execution.py#L159-L193 |
25,940 | quantopian/zipline | zipline/data/bundles/csvdir.py | csvdir_bundle | def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs']) | python | def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs']) | [
"def",
"csvdir_bundle",
"(",
"environ",
",",
"asset_db_writer",
",",
"minute_bar_writer",
",",
"daily_bar_writer",
",",
"adjustment_writer",
",",
"calendar",
",",
"start_session",
",",
"end_session",
",",
"cache",
",",
"show_progress",
",",
"output_dir",
",",
"tframes",
"=",
"None",
",",
"csvdir",
"=",
"None",
")",
":",
"if",
"not",
"csvdir",
":",
"csvdir",
"=",
"environ",
".",
"get",
"(",
"'CSVDIR'",
")",
"if",
"not",
"csvdir",
":",
"raise",
"ValueError",
"(",
"\"CSVDIR environment variable is not set\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"csvdir",
")",
":",
"raise",
"ValueError",
"(",
"\"%s is not a directory\"",
"%",
"csvdir",
")",
"if",
"not",
"tframes",
":",
"tframes",
"=",
"set",
"(",
"[",
"\"daily\"",
",",
"\"minute\"",
"]",
")",
".",
"intersection",
"(",
"os",
".",
"listdir",
"(",
"csvdir",
")",
")",
"if",
"not",
"tframes",
":",
"raise",
"ValueError",
"(",
"\"'daily' and 'minute' directories \"",
"\"not found in '%s'\"",
"%",
"csvdir",
")",
"divs_splits",
"=",
"{",
"'divs'",
":",
"DataFrame",
"(",
"columns",
"=",
"[",
"'sid'",
",",
"'amount'",
",",
"'ex_date'",
",",
"'record_date'",
",",
"'declared_date'",
",",
"'pay_date'",
"]",
")",
",",
"'splits'",
":",
"DataFrame",
"(",
"columns",
"=",
"[",
"'sid'",
",",
"'ratio'",
",",
"'effective_date'",
"]",
")",
"}",
"for",
"tframe",
"in",
"tframes",
":",
"ddir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"csvdir",
",",
"tframe",
")",
"symbols",
"=",
"sorted",
"(",
"item",
".",
"split",
"(",
"'.csv'",
")",
"[",
"0",
"]",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"ddir",
")",
"if",
"'.csv'",
"in",
"item",
")",
"if",
"not",
"symbols",
":",
"raise",
"ValueError",
"(",
"\"no <symbol>.csv* files found in %s\"",
"%",
"ddir",
")",
"dtype",
"=",
"[",
"(",
"'start_date'",
",",
"'datetime64[ns]'",
")",
",",
"(",
"'end_date'",
",",
"'datetime64[ns]'",
")",
",",
"(",
"'auto_close_date'",
",",
"'datetime64[ns]'",
")",
",",
"(",
"'symbol'",
",",
"'object'",
")",
"]",
"metadata",
"=",
"DataFrame",
"(",
"empty",
"(",
"len",
"(",
"symbols",
")",
",",
"dtype",
"=",
"dtype",
")",
")",
"if",
"tframe",
"==",
"'minute'",
":",
"writer",
"=",
"minute_bar_writer",
"else",
":",
"writer",
"=",
"daily_bar_writer",
"writer",
".",
"write",
"(",
"_pricing_iter",
"(",
"ddir",
",",
"symbols",
",",
"metadata",
",",
"divs_splits",
",",
"show_progress",
")",
",",
"show_progress",
"=",
"show_progress",
")",
"# Hardcode the exchange to \"CSVDIR\" for all assets and (elsewhere)",
"# register \"CSVDIR\" to resolve to the NYSE calendar, because these",
"# are all equities and thus can use the NYSE calendar.",
"metadata",
"[",
"'exchange'",
"]",
"=",
"\"CSVDIR\"",
"asset_db_writer",
".",
"write",
"(",
"equities",
"=",
"metadata",
")",
"divs_splits",
"[",
"'divs'",
"]",
"[",
"'sid'",
"]",
"=",
"divs_splits",
"[",
"'divs'",
"]",
"[",
"'sid'",
"]",
".",
"astype",
"(",
"int",
")",
"divs_splits",
"[",
"'splits'",
"]",
"[",
"'sid'",
"]",
"=",
"divs_splits",
"[",
"'splits'",
"]",
"[",
"'sid'",
"]",
".",
"astype",
"(",
"int",
")",
"adjustment_writer",
".",
"write",
"(",
"splits",
"=",
"divs_splits",
"[",
"'splits'",
"]",
",",
"dividends",
"=",
"divs_splits",
"[",
"'divs'",
"]",
")"
] | Build a zipline data bundle from the directory with csv files. | [
"Build",
"a",
"zipline",
"data",
"bundle",
"from",
"the",
"directory",
"with",
"csv",
"files",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/csvdir.py#L98-L168 |
25,941 | quantopian/zipline | zipline/pipeline/api_utils.py | restrict_to_dtype | def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
"""
def processor(term_method, _, term_instance):
term_dtype = term_instance.dtype
if term_dtype != dtype:
raise TypeError(
message_template.format(
method_name=term_method.__name__,
expected_dtype=dtype.name,
received_dtype=term_dtype,
)
)
return term_instance
return preprocess(self=processor) | python | def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
"""
def processor(term_method, _, term_instance):
term_dtype = term_instance.dtype
if term_dtype != dtype:
raise TypeError(
message_template.format(
method_name=term_method.__name__,
expected_dtype=dtype.name,
received_dtype=term_dtype,
)
)
return term_instance
return preprocess(self=processor) | [
"def",
"restrict_to_dtype",
"(",
"dtype",
",",
"message_template",
")",
":",
"def",
"processor",
"(",
"term_method",
",",
"_",
",",
"term_instance",
")",
":",
"term_dtype",
"=",
"term_instance",
".",
"dtype",
"if",
"term_dtype",
"!=",
"dtype",
":",
"raise",
"TypeError",
"(",
"message_template",
".",
"format",
"(",
"method_name",
"=",
"term_method",
".",
"__name__",
",",
"expected_dtype",
"=",
"dtype",
".",
"name",
",",
"received_dtype",
"=",
"term_dtype",
",",
")",
")",
"return",
"term_instance",
"return",
"preprocess",
"(",
"self",
"=",
"processor",
")"
] | A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...) | [
"A",
"factory",
"for",
"decorators",
"that",
"restrict",
"Term",
"methods",
"to",
"only",
"be",
"callable",
"on",
"Terms",
"with",
"a",
"specific",
"dtype",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/api_utils.py#L7-L49 |
25,942 | quantopian/zipline | zipline/sources/benchmark_source.py | BenchmarkSource.daily_returns | def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
"""
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end] | python | def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
"""
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end] | [
"def",
"daily_returns",
"(",
"self",
",",
"start",
",",
"end",
"=",
"None",
")",
":",
"if",
"end",
"is",
"None",
":",
"return",
"self",
".",
"_daily_returns",
"[",
"start",
"]",
"return",
"self",
".",
"_daily_returns",
"[",
"start",
":",
"end",
"]"
] | Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day. | [
"Returns",
"the",
"daily",
"returns",
"for",
"the",
"given",
"period",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/sources/benchmark_source.py#L124-L145 |
25,943 | quantopian/zipline | zipline/sources/benchmark_source.py | BenchmarkSource._initialize_precalculated_series | def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) | python | def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) | [
"def",
"_initialize_precalculated_series",
"(",
"self",
",",
"asset",
",",
"trading_calendar",
",",
"trading_days",
",",
"data_portal",
")",
":",
"if",
"self",
".",
"emission_rate",
"==",
"\"minute\"",
":",
"minutes",
"=",
"trading_calendar",
".",
"minutes_for_sessions_in_range",
"(",
"self",
".",
"sessions",
"[",
"0",
"]",
",",
"self",
".",
"sessions",
"[",
"-",
"1",
"]",
")",
"benchmark_series",
"=",
"data_portal",
".",
"get_history_window",
"(",
"[",
"asset",
"]",
",",
"minutes",
"[",
"-",
"1",
"]",
",",
"bar_count",
"=",
"len",
"(",
"minutes",
")",
"+",
"1",
",",
"frequency",
"=",
"\"1m\"",
",",
"field",
"=",
"\"price\"",
",",
"data_frequency",
"=",
"self",
".",
"emission_rate",
",",
"ffill",
"=",
"True",
")",
"[",
"asset",
"]",
"return",
"(",
"benchmark_series",
".",
"pct_change",
"(",
")",
"[",
"1",
":",
"]",
",",
"self",
".",
"downsample_minute_return_series",
"(",
"trading_calendar",
",",
"benchmark_series",
",",
")",
",",
")",
"start_date",
"=",
"asset",
".",
"start_date",
"if",
"start_date",
"<",
"trading_days",
"[",
"0",
"]",
":",
"# get the window of close prices for benchmark_asset from the",
"# last trading day of the simulation, going up to one day",
"# before the simulation start day (so that we can get the %",
"# change on day 1)",
"benchmark_series",
"=",
"data_portal",
".",
"get_history_window",
"(",
"[",
"asset",
"]",
",",
"trading_days",
"[",
"-",
"1",
"]",
",",
"bar_count",
"=",
"len",
"(",
"trading_days",
")",
"+",
"1",
",",
"frequency",
"=",
"\"1d\"",
",",
"field",
"=",
"\"price\"",
",",
"data_frequency",
"=",
"self",
".",
"emission_rate",
",",
"ffill",
"=",
"True",
")",
"[",
"asset",
"]",
"returns",
"=",
"benchmark_series",
".",
"pct_change",
"(",
")",
"[",
"1",
":",
"]",
"return",
"returns",
",",
"returns",
"elif",
"start_date",
"==",
"trading_days",
"[",
"0",
"]",
":",
"# Attempt to handle case where stock data starts on first",
"# day, in this case use the open to close return.",
"benchmark_series",
"=",
"data_portal",
".",
"get_history_window",
"(",
"[",
"asset",
"]",
",",
"trading_days",
"[",
"-",
"1",
"]",
",",
"bar_count",
"=",
"len",
"(",
"trading_days",
")",
",",
"frequency",
"=",
"\"1d\"",
",",
"field",
"=",
"\"price\"",
",",
"data_frequency",
"=",
"self",
".",
"emission_rate",
",",
"ffill",
"=",
"True",
")",
"[",
"asset",
"]",
"# get a minute history window of the first day",
"first_open",
"=",
"data_portal",
".",
"get_spot_value",
"(",
"asset",
",",
"'open'",
",",
"trading_days",
"[",
"0",
"]",
",",
"'daily'",
",",
")",
"first_close",
"=",
"data_portal",
".",
"get_spot_value",
"(",
"asset",
",",
"'close'",
",",
"trading_days",
"[",
"0",
"]",
",",
"'daily'",
",",
")",
"first_day_return",
"=",
"(",
"first_close",
"-",
"first_open",
")",
"/",
"first_open",
"returns",
"=",
"benchmark_series",
".",
"pct_change",
"(",
")",
"[",
":",
"]",
"returns",
"[",
"0",
"]",
"=",
"first_day_return",
"return",
"returns",
",",
"returns",
"else",
":",
"raise",
"ValueError",
"(",
"'cannot set benchmark to asset that does not exist during'",
"' the simulation period (asset start date=%r)'",
"%",
"start_date",
")"
] | Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute | [
"Internal",
"method",
"that",
"pre",
"-",
"calculates",
"the",
"benchmark",
"return",
"series",
"for",
"use",
"in",
"the",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/sources/benchmark_source.py#L196-L312 |
25,944 | quantopian/zipline | zipline/utils/run_algo.py | load_extensions | def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith('.py'):
with open(ext) as f:
ns = {}
six.exec_(compile(f.read(), ext, 'exec'), ns, ns)
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
'Failed to load extension: %r\n%s' % (ext, e),
stacklevel=2
)
else:
_loaded_extensions.add(ext) | python | def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith('.py'):
with open(ext) as f:
ns = {}
six.exec_(compile(f.read(), ext, 'exec'), ns, ns)
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
'Failed to load extension: %r\n%s' % (ext, e),
stacklevel=2
)
else:
_loaded_extensions.add(ext) | [
"def",
"load_extensions",
"(",
"default",
",",
"extensions",
",",
"strict",
",",
"environ",
",",
"reload",
"=",
"False",
")",
":",
"if",
"default",
":",
"default_extension_path",
"=",
"pth",
".",
"default_extension",
"(",
"environ",
"=",
"environ",
")",
"pth",
".",
"ensure_file",
"(",
"default_extension_path",
")",
"# put the default extension first so other extensions can depend on",
"# the order they are loaded",
"extensions",
"=",
"concatv",
"(",
"[",
"default_extension_path",
"]",
",",
"extensions",
")",
"for",
"ext",
"in",
"extensions",
":",
"if",
"ext",
"in",
"_loaded_extensions",
"and",
"not",
"reload",
":",
"continue",
"try",
":",
"# load all of the zipline extensionss",
"if",
"ext",
".",
"endswith",
"(",
"'.py'",
")",
":",
"with",
"open",
"(",
"ext",
")",
"as",
"f",
":",
"ns",
"=",
"{",
"}",
"six",
".",
"exec_",
"(",
"compile",
"(",
"f",
".",
"read",
"(",
")",
",",
"ext",
",",
"'exec'",
")",
",",
"ns",
",",
"ns",
")",
"else",
":",
"__import__",
"(",
"ext",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"strict",
":",
"# if `strict` we should raise the actual exception and fail",
"raise",
"# without `strict` we should just log the failure",
"warnings",
".",
"warn",
"(",
"'Failed to load extension: %r\\n%s'",
"%",
"(",
"ext",
",",
"e",
")",
",",
"stacklevel",
"=",
"2",
")",
"else",
":",
"_loaded_extensions",
".",
"add",
"(",
"ext",
")"
] | Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded. | [
"Load",
"all",
"of",
"the",
"given",
"extensions",
".",
"This",
"should",
"be",
"called",
"by",
"run_algo",
"or",
"the",
"cli",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/run_algo.py#L220-L268 |
25,945 | quantopian/zipline | zipline/utils/run_algo.py | run_algorithm | def run_algorithm(start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency='daily',
bundle='quantopian-quandl',
bundle_timestamp=None,
trading_calendar=None,
metrics_set='default',
benchmark_returns=None,
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ,
blotter='default'):
"""
Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
trading_calendar : TradingCalendar, optional
The trading calendar to use for your backtest.
metrics_set : iterable[Metric] or str, optional
The set of metrics to compute in the simulation. If a string is passed,
resolve the set with :func:`zipline.finance.metrics.load`.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
blotter : str or zipline.finance.blotter.Blotter, optional
Blotter to use with this algorithm. If passed as a string, we look for
a blotter construction function registered with
``zipline.extensions.register`` and call it with no parameters.
Default is a :class:`zipline.finance.blotter.SimulationBlotter` that
never cancels orders.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
"""
load_extensions(default_extension, extensions, strict_extensions, environ)
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
trading_calendar=trading_calendar,
print_algo=False,
metrics_set=metrics_set,
local_namespace=False,
environ=environ,
blotter=blotter,
benchmark_returns=benchmark_returns,
) | python | def run_algorithm(start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency='daily',
bundle='quantopian-quandl',
bundle_timestamp=None,
trading_calendar=None,
metrics_set='default',
benchmark_returns=None,
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ,
blotter='default'):
"""
Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
trading_calendar : TradingCalendar, optional
The trading calendar to use for your backtest.
metrics_set : iterable[Metric] or str, optional
The set of metrics to compute in the simulation. If a string is passed,
resolve the set with :func:`zipline.finance.metrics.load`.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
blotter : str or zipline.finance.blotter.Blotter, optional
Blotter to use with this algorithm. If passed as a string, we look for
a blotter construction function registered with
``zipline.extensions.register`` and call it with no parameters.
Default is a :class:`zipline.finance.blotter.SimulationBlotter` that
never cancels orders.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
"""
load_extensions(default_extension, extensions, strict_extensions, environ)
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
trading_calendar=trading_calendar,
print_algo=False,
metrics_set=metrics_set,
local_namespace=False,
environ=environ,
blotter=blotter,
benchmark_returns=benchmark_returns,
) | [
"def",
"run_algorithm",
"(",
"start",
",",
"end",
",",
"initialize",
",",
"capital_base",
",",
"handle_data",
"=",
"None",
",",
"before_trading_start",
"=",
"None",
",",
"analyze",
"=",
"None",
",",
"data_frequency",
"=",
"'daily'",
",",
"bundle",
"=",
"'quantopian-quandl'",
",",
"bundle_timestamp",
"=",
"None",
",",
"trading_calendar",
"=",
"None",
",",
"metrics_set",
"=",
"'default'",
",",
"benchmark_returns",
"=",
"None",
",",
"default_extension",
"=",
"True",
",",
"extensions",
"=",
"(",
")",
",",
"strict_extensions",
"=",
"True",
",",
"environ",
"=",
"os",
".",
"environ",
",",
"blotter",
"=",
"'default'",
")",
":",
"load_extensions",
"(",
"default_extension",
",",
"extensions",
",",
"strict_extensions",
",",
"environ",
")",
"return",
"_run",
"(",
"handle_data",
"=",
"handle_data",
",",
"initialize",
"=",
"initialize",
",",
"before_trading_start",
"=",
"before_trading_start",
",",
"analyze",
"=",
"analyze",
",",
"algofile",
"=",
"None",
",",
"algotext",
"=",
"None",
",",
"defines",
"=",
"(",
")",
",",
"data_frequency",
"=",
"data_frequency",
",",
"capital_base",
"=",
"capital_base",
",",
"bundle",
"=",
"bundle",
",",
"bundle_timestamp",
"=",
"bundle_timestamp",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"output",
"=",
"os",
".",
"devnull",
",",
"trading_calendar",
"=",
"trading_calendar",
",",
"print_algo",
"=",
"False",
",",
"metrics_set",
"=",
"metrics_set",
",",
"local_namespace",
"=",
"False",
",",
"environ",
"=",
"environ",
",",
"blotter",
"=",
"blotter",
",",
"benchmark_returns",
"=",
"benchmark_returns",
",",
")"
] | Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
trading_calendar : TradingCalendar, optional
The trading calendar to use for your backtest.
metrics_set : iterable[Metric] or str, optional
The set of metrics to compute in the simulation. If a string is passed,
resolve the set with :func:`zipline.finance.metrics.load`.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
blotter : str or zipline.finance.blotter.Blotter, optional
Blotter to use with this algorithm. If passed as a string, we look for
a blotter construction function registered with
``zipline.extensions.register`` and call it with no parameters.
Default is a :class:`zipline.finance.blotter.SimulationBlotter` that
never cancels orders.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles. | [
"Run",
"a",
"trading",
"algorithm",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/run_algo.py#L271-L381 |
25,946 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.handle_extra_source | def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df | python | def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df | [
"def",
"handle_extra_source",
"(",
"self",
",",
"source_df",
",",
"sim_params",
")",
":",
"if",
"source_df",
"is",
"None",
":",
"return",
"# Normalize all the dates in the df",
"source_df",
".",
"index",
"=",
"source_df",
".",
"index",
".",
"normalize",
"(",
")",
"# source_df's sid column can either consist of assets we know about",
"# (such as sid(24)) or of assets we don't know about (such as",
"# palladium).",
"#",
"# In both cases, we break up the dataframe into individual dfs",
"# that only contain a single asset's information. ie, if source_df",
"# has data for PALLADIUM and GOLD, we split source_df into two",
"# dataframes, one for each. (same applies if source_df has data for",
"# AAPL and IBM).",
"#",
"# We then take each child df and reindex it to the simulation's date",
"# range by forward-filling missing values. this makes reads simpler.",
"#",
"# Finally, we store the data. For each column, we store a mapping in",
"# self.augmented_sources_map from the column to a dictionary of",
"# asset -> df. In other words,",
"# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df",
"# holding that data.",
"source_date_index",
"=",
"self",
".",
"trading_calendar",
".",
"sessions_in_range",
"(",
"sim_params",
".",
"start_session",
",",
"sim_params",
".",
"end_session",
")",
"# Break the source_df up into one dataframe per sid. This lets",
"# us (more easily) calculate accurate start/end dates for each sid,",
"# de-dup data, and expand the data to fit the backtest start/end date.",
"grouped_by_sid",
"=",
"source_df",
".",
"groupby",
"(",
"[",
"\"sid\"",
"]",
")",
"group_names",
"=",
"grouped_by_sid",
".",
"groups",
".",
"keys",
"(",
")",
"group_dict",
"=",
"{",
"}",
"for",
"group_name",
"in",
"group_names",
":",
"group_dict",
"[",
"group_name",
"]",
"=",
"grouped_by_sid",
".",
"get_group",
"(",
"group_name",
")",
"# This will be the dataframe which we query to get fetcher assets at",
"# any given time. Get's overwritten every time there's a new fetcher",
"# call",
"extra_source_df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"identifier",
",",
"df",
"in",
"iteritems",
"(",
"group_dict",
")",
":",
"# Since we know this df only contains a single sid, we can safely",
"# de-dupe by the index (dt). If minute granularity, will take the",
"# last data point on any given day",
"df",
"=",
"df",
".",
"groupby",
"(",
"level",
"=",
"0",
")",
".",
"last",
"(",
")",
"# Reindex the dataframe based on the backtest start/end date.",
"# This makes reads easier during the backtest.",
"df",
"=",
"self",
".",
"_reindex_extra_source",
"(",
"df",
",",
"source_date_index",
")",
"for",
"col_name",
"in",
"df",
".",
"columns",
".",
"difference",
"(",
"[",
"'sid'",
"]",
")",
":",
"if",
"col_name",
"not",
"in",
"self",
".",
"_augmented_sources_map",
":",
"self",
".",
"_augmented_sources_map",
"[",
"col_name",
"]",
"=",
"{",
"}",
"self",
".",
"_augmented_sources_map",
"[",
"col_name",
"]",
"[",
"identifier",
"]",
"=",
"df",
"# Append to extra_source_df the reindexed dataframe for the single",
"# sid",
"extra_source_df",
"=",
"extra_source_df",
".",
"append",
"(",
"df",
")",
"self",
".",
"_extra_source_df",
"=",
"extra_source_df"
] | Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation. | [
"Extra",
"sources",
"always",
"have",
"a",
"sid",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L324-L394 |
25,947 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_last_traded_dt | def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt) | python | def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt) | [
"def",
"get_last_traded_dt",
"(",
"self",
",",
"asset",
",",
"dt",
",",
"data_frequency",
")",
":",
"return",
"self",
".",
"_get_pricing_reader",
"(",
"data_frequency",
")",
".",
"get_last_traded_dt",
"(",
"asset",
",",
"dt",
")"
] | Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided. | [
"Given",
"an",
"asset",
"and",
"dt",
"returns",
"the",
"last",
"traded",
"dt",
"from",
"the",
"viewpoint",
"of",
"the",
"given",
"dt",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L399-L407 |
25,948 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_adjustments | def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset | python | def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset | [
"def",
"get_adjustments",
"(",
"self",
",",
"assets",
",",
"field",
",",
"dt",
",",
"perspective_dt",
")",
":",
"if",
"isinstance",
"(",
"assets",
",",
"Asset",
")",
":",
"assets",
"=",
"[",
"assets",
"]",
"adjustment_ratios_per_asset",
"=",
"[",
"]",
"def",
"split_adj_factor",
"(",
"x",
")",
":",
"return",
"x",
"if",
"field",
"!=",
"'volume'",
"else",
"1.0",
"/",
"x",
"for",
"asset",
"in",
"assets",
":",
"adjustments_for_asset",
"=",
"[",
"]",
"split_adjustments",
"=",
"self",
".",
"_get_adjustment_list",
"(",
"asset",
",",
"self",
".",
"_splits_dict",
",",
"\"SPLITS\"",
")",
"for",
"adj_dt",
",",
"adj",
"in",
"split_adjustments",
":",
"if",
"dt",
"<",
"adj_dt",
"<=",
"perspective_dt",
":",
"adjustments_for_asset",
".",
"append",
"(",
"split_adj_factor",
"(",
"adj",
")",
")",
"elif",
"adj_dt",
">",
"perspective_dt",
":",
"break",
"if",
"field",
"!=",
"'volume'",
":",
"merger_adjustments",
"=",
"self",
".",
"_get_adjustment_list",
"(",
"asset",
",",
"self",
".",
"_mergers_dict",
",",
"\"MERGERS\"",
")",
"for",
"adj_dt",
",",
"adj",
"in",
"merger_adjustments",
":",
"if",
"dt",
"<",
"adj_dt",
"<=",
"perspective_dt",
":",
"adjustments_for_asset",
".",
"append",
"(",
"adj",
")",
"elif",
"adj_dt",
">",
"perspective_dt",
":",
"break",
"dividend_adjustments",
"=",
"self",
".",
"_get_adjustment_list",
"(",
"asset",
",",
"self",
".",
"_dividends_dict",
",",
"\"DIVIDENDS\"",
",",
")",
"for",
"adj_dt",
",",
"adj",
"in",
"dividend_adjustments",
":",
"if",
"dt",
"<",
"adj_dt",
"<=",
"perspective_dt",
":",
"adjustments_for_asset",
".",
"append",
"(",
"adj",
")",
"elif",
"adj_dt",
">",
"perspective_dt",
":",
"break",
"ratio",
"=",
"reduce",
"(",
"mul",
",",
"adjustments_for_asset",
",",
"1.0",
")",
"adjustment_ratios_per_asset",
".",
"append",
"(",
"ratio",
")",
"return",
"adjustment_ratios_per_asset"
] | Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field. | [
"Returns",
"a",
"list",
"of",
"adjustments",
"between",
"the",
"dt",
"and",
"perspective_dt",
"for",
"the",
"given",
"field",
"and",
"list",
"of",
"assets"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L575-L638 |
25,949 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_adjusted_value | def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value | python | def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value | [
"def",
"get_adjusted_value",
"(",
"self",
",",
"asset",
",",
"field",
",",
"dt",
",",
"perspective_dt",
",",
"data_frequency",
",",
"spot_value",
"=",
"None",
")",
":",
"if",
"spot_value",
"is",
"None",
":",
"# if this a fetcher field, we want to use perspective_dt (not dt)",
"# because we want the new value as of midnight (fetcher only works",
"# on a daily basis, all timestamps are on midnight)",
"if",
"self",
".",
"_is_extra_source",
"(",
"asset",
",",
"field",
",",
"self",
".",
"_augmented_sources_map",
")",
":",
"spot_value",
"=",
"self",
".",
"get_spot_value",
"(",
"asset",
",",
"field",
",",
"perspective_dt",
",",
"data_frequency",
")",
"else",
":",
"spot_value",
"=",
"self",
".",
"get_spot_value",
"(",
"asset",
",",
"field",
",",
"dt",
",",
"data_frequency",
")",
"if",
"isinstance",
"(",
"asset",
",",
"Equity",
")",
":",
"ratio",
"=",
"self",
".",
"get_adjustments",
"(",
"asset",
",",
"field",
",",
"dt",
",",
"perspective_dt",
")",
"[",
"0",
"]",
"spot_value",
"*=",
"ratio",
"return",
"spot_value"
] | Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp. | [
"Returns",
"a",
"scalar",
"value",
"representing",
"the",
"value",
"of",
"the",
"desired",
"asset",
"s",
"field",
"at",
"the",
"given",
"dt",
"with",
"adjustments",
"applied",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L640-L689 |
25,950 | quantopian/zipline | zipline/data/data_portal.py | DataPortal._get_history_daily_window | def _get_history_daily_window(self,
assets,
end_dt,
bar_count,
field_to_use,
data_frequency):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
) | python | def _get_history_daily_window(self,
assets,
end_dt,
bar_count,
field_to_use,
data_frequency):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
) | [
"def",
"_get_history_daily_window",
"(",
"self",
",",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"field_to_use",
",",
"data_frequency",
")",
":",
"session",
"=",
"self",
".",
"trading_calendar",
".",
"minute_to_session_label",
"(",
"end_dt",
")",
"days_for_window",
"=",
"self",
".",
"_get_days_for_window",
"(",
"session",
",",
"bar_count",
")",
"if",
"len",
"(",
"assets",
")",
"==",
"0",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"None",
",",
"index",
"=",
"days_for_window",
",",
"columns",
"=",
"None",
")",
"data",
"=",
"self",
".",
"_get_history_daily_window_data",
"(",
"assets",
",",
"days_for_window",
",",
"end_dt",
",",
"field_to_use",
",",
"data_frequency",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"index",
"=",
"days_for_window",
",",
"columns",
"=",
"assets",
")"
] | Internal method that returns a dataframe containing history bars
of daily frequency for the given sids. | [
"Internal",
"method",
"that",
"returns",
"a",
"dataframe",
"containing",
"history",
"bars",
"of",
"daily",
"frequency",
"for",
"the",
"given",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L787-L812 |
25,951 | quantopian/zipline | zipline/data/data_portal.py | DataPortal._get_history_minute_window | def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
) | python | def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
) | [
"def",
"_get_history_minute_window",
"(",
"self",
",",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"field_to_use",
")",
":",
"# get all the minutes for this window",
"try",
":",
"minutes_for_window",
"=",
"self",
".",
"trading_calendar",
".",
"minutes_window",
"(",
"end_dt",
",",
"-",
"bar_count",
")",
"except",
"KeyError",
":",
"self",
".",
"_handle_minute_history_out_of_bounds",
"(",
"bar_count",
")",
"if",
"minutes_for_window",
"[",
"0",
"]",
"<",
"self",
".",
"_first_trading_minute",
":",
"self",
".",
"_handle_minute_history_out_of_bounds",
"(",
"bar_count",
")",
"asset_minute_data",
"=",
"self",
".",
"_get_minute_window_data",
"(",
"assets",
",",
"field_to_use",
",",
"minutes_for_window",
",",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"asset_minute_data",
",",
"index",
"=",
"minutes_for_window",
",",
"columns",
"=",
"assets",
")"
] | Internal method that returns a dataframe containing history bars
of minute frequency for the given sids. | [
"Internal",
"method",
"that",
"returns",
"a",
"dataframe",
"containing",
"history",
"bars",
"of",
"minute",
"frequency",
"for",
"the",
"given",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L886-L913 |
25,952 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_history_window | def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != 'sid':
raise ValueError("Invalid field: {0}".format(field))
if bar_count < 1:
raise ValueError(
"bar_count must be >= 1, but got {}".format(bar_count)
)
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close", data_frequency)
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field, data_frequency)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
ffill_data_frequency = 'minute'
elif frequency == "1d":
ffill_data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
if ffill_data_frequency == 'daily' and data_frequency == 'minute':
# When we're looking for a daily value, but we haven't seen any
# volume in today's minute bars yet, we need to use the
# previous day's ffilled daily price. Using today's daily price
# could yield a value from later today.
history_start -= self.trading_calendar.day
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
ffill_data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=ffill_data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.ix[0, assets_with_leading_nan] = np.array(
initial_values,
dtype=np.float64
)
df.fillna(method='ffill', inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df | python | def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != 'sid':
raise ValueError("Invalid field: {0}".format(field))
if bar_count < 1:
raise ValueError(
"bar_count must be >= 1, but got {}".format(bar_count)
)
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close", data_frequency)
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field, data_frequency)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
ffill_data_frequency = 'minute'
elif frequency == "1d":
ffill_data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
if ffill_data_frequency == 'daily' and data_frequency == 'minute':
# When we're looking for a daily value, but we haven't seen any
# volume in today's minute bars yet, we need to use the
# previous day's ffilled daily price. Using today's daily price
# could yield a value from later today.
history_start -= self.trading_calendar.day
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
ffill_data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=ffill_data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.ix[0, assets_with_leading_nan] = np.array(
initial_values,
dtype=np.float64
)
df.fillna(method='ffill', inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df | [
"def",
"get_history_window",
"(",
"self",
",",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"frequency",
",",
"field",
",",
"data_frequency",
",",
"ffill",
"=",
"True",
")",
":",
"if",
"field",
"not",
"in",
"OHLCVP_FIELDS",
"and",
"field",
"!=",
"'sid'",
":",
"raise",
"ValueError",
"(",
"\"Invalid field: {0}\"",
".",
"format",
"(",
"field",
")",
")",
"if",
"bar_count",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"bar_count must be >= 1, but got {}\"",
".",
"format",
"(",
"bar_count",
")",
")",
"if",
"frequency",
"==",
"\"1d\"",
":",
"if",
"field",
"==",
"\"price\"",
":",
"df",
"=",
"self",
".",
"_get_history_daily_window",
"(",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"\"close\"",
",",
"data_frequency",
")",
"else",
":",
"df",
"=",
"self",
".",
"_get_history_daily_window",
"(",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"field",
",",
"data_frequency",
")",
"elif",
"frequency",
"==",
"\"1m\"",
":",
"if",
"field",
"==",
"\"price\"",
":",
"df",
"=",
"self",
".",
"_get_history_minute_window",
"(",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"\"close\"",
")",
"else",
":",
"df",
"=",
"self",
".",
"_get_history_minute_window",
"(",
"assets",
",",
"end_dt",
",",
"bar_count",
",",
"field",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid frequency: {0}\"",
".",
"format",
"(",
"frequency",
")",
")",
"# forward-fill price",
"if",
"field",
"==",
"\"price\"",
":",
"if",
"frequency",
"==",
"\"1m\"",
":",
"ffill_data_frequency",
"=",
"'minute'",
"elif",
"frequency",
"==",
"\"1d\"",
":",
"ffill_data_frequency",
"=",
"'daily'",
"else",
":",
"raise",
"Exception",
"(",
"\"Only 1d and 1m are supported for forward-filling.\"",
")",
"assets_with_leading_nan",
"=",
"np",
".",
"where",
"(",
"isnull",
"(",
"df",
".",
"iloc",
"[",
"0",
"]",
")",
")",
"[",
"0",
"]",
"history_start",
",",
"history_end",
"=",
"df",
".",
"index",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"if",
"ffill_data_frequency",
"==",
"'daily'",
"and",
"data_frequency",
"==",
"'minute'",
":",
"# When we're looking for a daily value, but we haven't seen any",
"# volume in today's minute bars yet, we need to use the",
"# previous day's ffilled daily price. Using today's daily price",
"# could yield a value from later today.",
"history_start",
"-=",
"self",
".",
"trading_calendar",
".",
"day",
"initial_values",
"=",
"[",
"]",
"for",
"asset",
"in",
"df",
".",
"columns",
"[",
"assets_with_leading_nan",
"]",
":",
"last_traded",
"=",
"self",
".",
"get_last_traded_dt",
"(",
"asset",
",",
"history_start",
",",
"ffill_data_frequency",
",",
")",
"if",
"isnull",
"(",
"last_traded",
")",
":",
"initial_values",
".",
"append",
"(",
"nan",
")",
"else",
":",
"initial_values",
".",
"append",
"(",
"self",
".",
"get_adjusted_value",
"(",
"asset",
",",
"field",
",",
"dt",
"=",
"last_traded",
",",
"perspective_dt",
"=",
"history_end",
",",
"data_frequency",
"=",
"ffill_data_frequency",
",",
")",
")",
"# Set leading values for assets that were missing data, then ffill.",
"df",
".",
"ix",
"[",
"0",
",",
"assets_with_leading_nan",
"]",
"=",
"np",
".",
"array",
"(",
"initial_values",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"df",
".",
"fillna",
"(",
"method",
"=",
"'ffill'",
",",
"inplace",
"=",
"True",
")",
"# forward-filling will incorrectly produce values after the end of",
"# an asset's lifetime, so write NaNs back over the asset's",
"# end_date.",
"normed_index",
"=",
"df",
".",
"index",
".",
"normalize",
"(",
")",
"for",
"asset",
"in",
"df",
".",
"columns",
":",
"if",
"history_end",
">=",
"asset",
".",
"end_date",
":",
"# if the window extends past the asset's end date, set",
"# all post-end-date values to NaN in that asset's series",
"df",
".",
"loc",
"[",
"normed_index",
">",
"asset",
".",
"end_date",
",",
"asset",
"]",
"=",
"nan",
"return",
"df"
] | Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data. | [
"Public",
"API",
"method",
"that",
"returns",
"a",
"dataframe",
"containing",
"the",
"requested",
"history",
"window",
".",
"Data",
"is",
"fully",
"adjusted",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L915-L1034 |
25,953 | quantopian/zipline | zipline/data/data_portal.py | DataPortal._get_minute_window_data | def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False) | python | def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False) | [
"def",
"_get_minute_window_data",
"(",
"self",
",",
"assets",
",",
"field",
",",
"minutes_for_window",
")",
":",
"return",
"self",
".",
"_minute_history_loader",
".",
"history",
"(",
"assets",
",",
"minutes_for_window",
",",
"field",
",",
"False",
")"
] | Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values. | [
"Internal",
"method",
"that",
"gets",
"a",
"window",
"of",
"adjusted",
"minute",
"data",
"for",
"an",
"asset",
"and",
"specified",
"date",
"range",
".",
"Used",
"to",
"support",
"the",
"history",
"API",
"method",
"for",
"minute",
"bars",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1036-L1063 |
25,954 | quantopian/zipline | zipline/data/data_portal.py | DataPortal._get_daily_window_data | def _get_daily_window_data(self,
assets,
field,
days_in_window,
extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != 'sid' else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array | python | def _get_daily_window_data(self,
assets,
field,
days_in_window,
extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != 'sid' else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array | [
"def",
"_get_daily_window_data",
"(",
"self",
",",
"assets",
",",
"field",
",",
"days_in_window",
",",
"extra_slot",
"=",
"True",
")",
":",
"bar_count",
"=",
"len",
"(",
"days_in_window",
")",
"# create an np.array of size bar_count",
"dtype",
"=",
"float64",
"if",
"field",
"!=",
"'sid'",
"else",
"int64",
"if",
"extra_slot",
":",
"return_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"bar_count",
"+",
"1",
",",
"len",
"(",
"assets",
")",
")",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"return_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"bar_count",
",",
"len",
"(",
"assets",
")",
")",
",",
"dtype",
"=",
"dtype",
")",
"if",
"field",
"!=",
"\"volume\"",
":",
"# volumes default to 0, so we don't need to put NaNs in the array",
"return_array",
"[",
":",
"]",
"=",
"np",
".",
"NAN",
"if",
"bar_count",
"!=",
"0",
":",
"data",
"=",
"self",
".",
"_history_loader",
".",
"history",
"(",
"assets",
",",
"days_in_window",
",",
"field",
",",
"extra_slot",
")",
"if",
"extra_slot",
":",
"return_array",
"[",
":",
"len",
"(",
"return_array",
")",
"-",
"1",
",",
":",
"]",
"=",
"data",
"else",
":",
"return_array",
"[",
":",
"len",
"(",
"data",
")",
"]",
"=",
"data",
"return",
"return_array"
] | Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan. | [
"Internal",
"method",
"that",
"gets",
"a",
"window",
"of",
"adjusted",
"daily",
"data",
"for",
"a",
"sid",
"and",
"specified",
"date",
"range",
".",
"Used",
"to",
"support",
"the",
"history",
"API",
"method",
"for",
"daily",
"bars",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1065-L1122 |
25,955 | quantopian/zipline | zipline/data/data_portal.py | DataPortal._get_adjustment_list | def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments | python | def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments | [
"def",
"_get_adjustment_list",
"(",
"self",
",",
"asset",
",",
"adjustments_dict",
",",
"table_name",
")",
":",
"if",
"self",
".",
"_adjustment_reader",
"is",
"None",
":",
"return",
"[",
"]",
"sid",
"=",
"int",
"(",
"asset",
")",
"try",
":",
"adjustments",
"=",
"adjustments_dict",
"[",
"sid",
"]",
"except",
"KeyError",
":",
"adjustments",
"=",
"adjustments_dict",
"[",
"sid",
"]",
"=",
"self",
".",
"_adjustment_reader",
".",
"get_adjustments_for_sid",
"(",
"table_name",
",",
"sid",
")",
"return",
"adjustments"
] | Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first | [
"Internal",
"method",
"that",
"returns",
"a",
"list",
"of",
"adjustments",
"for",
"the",
"given",
"sid",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1124-L1156 |
25,956 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_splits | def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [(self.asset_finder.retrieve_asset(split[0]), split[1])
for split in splits]
return splits | python | def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [(self.asset_finder.retrieve_asset(split[0]), split[1])
for split in splits]
return splits | [
"def",
"get_splits",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"if",
"self",
".",
"_adjustment_reader",
"is",
"None",
"or",
"not",
"assets",
":",
"return",
"[",
"]",
"# convert dt to # of seconds since epoch, because that's what we use",
"# in the adjustments db",
"seconds",
"=",
"int",
"(",
"dt",
".",
"value",
"/",
"1e9",
")",
"splits",
"=",
"self",
".",
"_adjustment_reader",
".",
"conn",
".",
"execute",
"(",
"\"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?\"",
",",
"(",
"seconds",
",",
")",
")",
".",
"fetchall",
"(",
")",
"splits",
"=",
"[",
"split",
"for",
"split",
"in",
"splits",
"if",
"split",
"[",
"0",
"]",
"in",
"assets",
"]",
"splits",
"=",
"[",
"(",
"self",
".",
"asset_finder",
".",
"retrieve_asset",
"(",
"split",
"[",
"0",
"]",
")",
",",
"split",
"[",
"1",
"]",
")",
"for",
"split",
"in",
"splits",
"]",
"return",
"splits"
] | Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple. | [
"Returns",
"any",
"splits",
"for",
"the",
"given",
"sids",
"and",
"the",
"given",
"dt",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1158-L1190 |
25,957 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_stock_dividends | def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info | python | def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info | [
"def",
"get_stock_dividends",
"(",
"self",
",",
"sid",
",",
"trading_days",
")",
":",
"if",
"self",
".",
"_adjustment_reader",
"is",
"None",
":",
"return",
"[",
"]",
"if",
"len",
"(",
"trading_days",
")",
"==",
"0",
":",
"return",
"[",
"]",
"start_dt",
"=",
"trading_days",
"[",
"0",
"]",
".",
"value",
"/",
"1e9",
"end_dt",
"=",
"trading_days",
"[",
"-",
"1",
"]",
".",
"value",
"/",
"1e9",
"dividends",
"=",
"self",
".",
"_adjustment_reader",
".",
"conn",
".",
"execute",
"(",
"\"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND \"",
"\"ex_date > ? AND pay_date < ?\"",
",",
"(",
"int",
"(",
"sid",
")",
",",
"start_dt",
",",
"end_dt",
",",
")",
")",
".",
"fetchall",
"(",
")",
"dividend_info",
"=",
"[",
"]",
"for",
"dividend_tuple",
"in",
"dividends",
":",
"dividend_info",
".",
"append",
"(",
"{",
"\"declared_date\"",
":",
"dividend_tuple",
"[",
"1",
"]",
",",
"\"ex_date\"",
":",
"pd",
".",
"Timestamp",
"(",
"dividend_tuple",
"[",
"2",
"]",
",",
"unit",
"=",
"\"s\"",
")",
",",
"\"pay_date\"",
":",
"pd",
".",
"Timestamp",
"(",
"dividend_tuple",
"[",
"3",
"]",
",",
"unit",
"=",
"\"s\"",
")",
",",
"\"payment_sid\"",
":",
"dividend_tuple",
"[",
"4",
"]",
",",
"\"ratio\"",
":",
"dividend_tuple",
"[",
"5",
"]",
",",
"\"record_date\"",
":",
"pd",
".",
"Timestamp",
"(",
"dividend_tuple",
"[",
"6",
"]",
",",
"unit",
"=",
"\"s\"",
")",
",",
"\"sid\"",
":",
"dividend_tuple",
"[",
"7",
"]",
"}",
")",
"return",
"dividend_info"
] | Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps. | [
"Returns",
"all",
"the",
"stock",
"dividends",
"for",
"a",
"specific",
"sid",
"that",
"occur",
"in",
"the",
"given",
"trading",
"range",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1192-L1237 |
25,958 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_fetcher_assets | def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else [] | python | def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else [] | [
"def",
"get_fetcher_assets",
"(",
"self",
",",
"dt",
")",
":",
"# return a list of assets for the current date, as defined by the",
"# fetcher source",
"if",
"self",
".",
"_extra_source_df",
"is",
"None",
":",
"return",
"[",
"]",
"day",
"=",
"normalize_date",
"(",
"dt",
")",
"if",
"day",
"in",
"self",
".",
"_extra_source_df",
".",
"index",
":",
"assets",
"=",
"self",
".",
"_extra_source_df",
".",
"loc",
"[",
"day",
"]",
"[",
"'sid'",
"]",
"else",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"assets",
",",
"pd",
".",
"Series",
")",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"assets",
"if",
"isinstance",
"(",
"x",
",",
"Asset",
")",
"]",
"else",
":",
"return",
"[",
"assets",
"]",
"if",
"isinstance",
"(",
"assets",
",",
"Asset",
")",
"else",
"[",
"]"
] | Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects. | [
"Returns",
"a",
"list",
"of",
"assets",
"for",
"the",
"current",
"date",
"as",
"defined",
"by",
"the",
"fetcher",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1244-L1268 |
25,959 | quantopian/zipline | zipline/data/data_portal.py | DataPortal.get_current_future_chain | def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain) | python | def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain) | [
"def",
"get_current_future_chain",
"(",
"self",
",",
"continuous_future",
",",
"dt",
")",
":",
"rf",
"=",
"self",
".",
"_roll_finders",
"[",
"continuous_future",
".",
"roll_style",
"]",
"session",
"=",
"self",
".",
"trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"contract_center",
"=",
"rf",
".",
"get_contract_center",
"(",
"continuous_future",
".",
"root_symbol",
",",
"session",
",",
"continuous_future",
".",
"offset",
")",
"oc",
"=",
"self",
".",
"asset_finder",
".",
"get_ordered_contracts",
"(",
"continuous_future",
".",
"root_symbol",
")",
"chain",
"=",
"oc",
".",
"active_chain",
"(",
"contract_center",
",",
"session",
".",
"value",
")",
"return",
"self",
".",
"asset_finder",
".",
"retrieve_all",
"(",
"chain",
")"
] | Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on. | [
"Retrieves",
"the",
"future",
"chain",
"for",
"the",
"contract",
"at",
"the",
"given",
"dt",
"according",
"the",
"continuous_future",
"specification",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1391-L1412 |
25,960 | quantopian/zipline | zipline/utils/numpy_utils.py | coerce_to_dtype | def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith('datetime64'):
if name == 'datetime64[D]':
return make_datetime64D(value)
elif name == 'datetime64[ns]':
return make_datetime64ns(value)
else:
raise TypeError(
"Don't know how to coerce values of dtype %s" % dtype
)
return dtype.type(value) | python | def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith('datetime64'):
if name == 'datetime64[D]':
return make_datetime64D(value)
elif name == 'datetime64[ns]':
return make_datetime64ns(value)
else:
raise TypeError(
"Don't know how to coerce values of dtype %s" % dtype
)
return dtype.type(value) | [
"def",
"coerce_to_dtype",
"(",
"dtype",
",",
"value",
")",
":",
"name",
"=",
"dtype",
".",
"name",
"if",
"name",
".",
"startswith",
"(",
"'datetime64'",
")",
":",
"if",
"name",
"==",
"'datetime64[D]'",
":",
"return",
"make_datetime64D",
"(",
"value",
")",
"elif",
"name",
"==",
"'datetime64[ns]'",
":",
"return",
"make_datetime64ns",
"(",
"value",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Don't know how to coerce values of dtype %s\"",
"%",
"dtype",
")",
"return",
"dtype",
".",
"type",
"(",
"value",
")"
] | Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. | [
"Make",
"a",
"value",
"with",
"the",
"specified",
"numpy",
"dtype",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L142-L158 |
25,961 | quantopian/zipline | zipline/utils/numpy_utils.py | repeat_first_axis | def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides) | python | def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides) | [
"def",
"repeat_first_axis",
"(",
"array",
",",
"count",
")",
":",
"return",
"as_strided",
"(",
"array",
",",
"(",
"count",
",",
")",
"+",
"array",
".",
"shape",
",",
"(",
"0",
",",
")",
"+",
"array",
".",
"strides",
")"
] | Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis | [
"Restride",
"array",
"to",
"repeat",
"count",
"times",
"along",
"the",
"first",
"axis",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L173-L213 |
25,962 | quantopian/zipline | zipline/utils/numpy_utils.py | repeat_last_axis | def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,)) | python | def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,)) | [
"def",
"repeat_last_axis",
"(",
"array",
",",
"count",
")",
":",
"return",
"as_strided",
"(",
"array",
",",
"array",
".",
"shape",
"+",
"(",
"count",
",",
")",
",",
"array",
".",
"strides",
"+",
"(",
"0",
",",
")",
")"
] | Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis | [
"Restride",
"array",
"to",
"repeat",
"count",
"times",
"along",
"the",
"last",
"axis",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L216-L256 |
25,963 | quantopian/zipline | zipline/utils/numpy_utils.py | isnat | def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ('m', 'M'):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT | python | def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ('m', 'M'):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT | [
"def",
"isnat",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"dtype",
".",
"kind",
"not",
"in",
"(",
"'m'",
",",
"'M'",
")",
":",
"raise",
"ValueError",
"(",
"\"%s is not a numpy datetime or timedelta\"",
")",
"return",
"obj",
".",
"view",
"(",
"int64_dtype",
")",
"==",
"iNaT"
] | Check if a value is np.NaT. | [
"Check",
"if",
"a",
"value",
"is",
"np",
".",
"NaT",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L334-L340 |
25,964 | quantopian/zipline | zipline/utils/numpy_utils.py | is_missing | def is_missing(data, missing_value):
"""
Generic is_missing function that handles NaN and NaT.
"""
if is_float(data) and isnan(missing_value):
return isnan(data)
elif is_datetime(data) and isnat(missing_value):
return isnat(data)
return (data == missing_value) | python | def is_missing(data, missing_value):
"""
Generic is_missing function that handles NaN and NaT.
"""
if is_float(data) and isnan(missing_value):
return isnan(data)
elif is_datetime(data) and isnat(missing_value):
return isnat(data)
return (data == missing_value) | [
"def",
"is_missing",
"(",
"data",
",",
"missing_value",
")",
":",
"if",
"is_float",
"(",
"data",
")",
"and",
"isnan",
"(",
"missing_value",
")",
":",
"return",
"isnan",
"(",
"data",
")",
"elif",
"is_datetime",
"(",
"data",
")",
"and",
"isnat",
"(",
"missing_value",
")",
":",
"return",
"isnat",
"(",
"data",
")",
"return",
"(",
"data",
"==",
"missing_value",
")"
] | Generic is_missing function that handles NaN and NaT. | [
"Generic",
"is_missing",
"function",
"that",
"handles",
"NaN",
"and",
"NaT",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L343-L351 |
25,965 | quantopian/zipline | zipline/utils/numpy_utils.py | busday_count_mask_NaT | def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = isnat(begindates)
endmask = isnat(enddates)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out | python | def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = isnat(begindates)
endmask = isnat(enddates)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out | [
"def",
"busday_count_mask_NaT",
"(",
"begindates",
",",
"enddates",
",",
"out",
"=",
"None",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"empty",
"(",
"broadcast",
"(",
"begindates",
",",
"enddates",
")",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
"beginmask",
"=",
"isnat",
"(",
"begindates",
")",
"endmask",
"=",
"isnat",
"(",
"enddates",
")",
"out",
"=",
"busday_count",
"(",
"# Temporarily fill in non-NaT values.",
"where",
"(",
"beginmask",
",",
"_notNaT",
",",
"begindates",
")",
",",
"where",
"(",
"endmask",
",",
"_notNaT",
",",
"enddates",
")",
",",
"out",
"=",
"out",
",",
")",
"# Fill in entries where either comparison was NaT with nan in the output.",
"out",
"[",
"beginmask",
"|",
"endmask",
"]",
"=",
"nan",
"return",
"out"
] | Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count | [
"Simple",
"of",
"numpy",
".",
"busday_count",
"that",
"returns",
"float",
"arrays",
"rather",
"than",
"int",
"arrays",
"and",
"handles",
"NaT",
"s",
"by",
"returning",
"NaN",
"s",
"where",
"the",
"inputs",
"were",
"NaT",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L354-L381 |
25,966 | quantopian/zipline | zipline/utils/numpy_utils.py | changed_locations | def changed_locations(a, include_first):
"""
Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4])
"""
if a.ndim > 1:
raise ValueError("indices_of_changed_values only supports 1D arrays.")
indices = flatnonzero(diff(a)) + 1
if not include_first:
return indices
return hstack([[0], indices]) | python | def changed_locations(a, include_first):
"""
Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4])
"""
if a.ndim > 1:
raise ValueError("indices_of_changed_values only supports 1D arrays.")
indices = flatnonzero(diff(a)) + 1
if not include_first:
return indices
return hstack([[0], indices]) | [
"def",
"changed_locations",
"(",
"a",
",",
"include_first",
")",
":",
"if",
"a",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"indices_of_changed_values only supports 1D arrays.\"",
")",
"indices",
"=",
"flatnonzero",
"(",
"diff",
"(",
"a",
")",
")",
"+",
"1",
"if",
"not",
"include_first",
":",
"return",
"indices",
"return",
"hstack",
"(",
"[",
"[",
"0",
"]",
",",
"indices",
"]",
")"
] | Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4]) | [
"Compute",
"indices",
"of",
"values",
"in",
"a",
"that",
"differ",
"from",
"the",
"previous",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L469-L496 |
25,967 | quantopian/zipline | zipline/utils/date_utils.py | compute_date_range_chunks | def compute_date_range_chunks(sessions, start_date, end_date, chunksize):
"""Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for.
"""
if start_date not in sessions:
raise KeyError("Start date %s is not found in calendar." %
(start_date.strftime("%Y-%m-%d"),))
if end_date not in sessions:
raise KeyError("End date %s is not found in calendar." %
(end_date.strftime("%Y-%m-%d"),))
if end_date < start_date:
raise ValueError("End date %s cannot precede start date %s." %
(end_date.strftime("%Y-%m-%d"),
start_date.strftime("%Y-%m-%d")))
if chunksize is None:
return [(start_date, end_date)]
start_ix, end_ix = sessions.slice_locs(start_date, end_date)
return (
(r[0], r[-1]) for r in partition_all(
chunksize, sessions[start_ix:end_ix]
)
) | python | def compute_date_range_chunks(sessions, start_date, end_date, chunksize):
"""Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for.
"""
if start_date not in sessions:
raise KeyError("Start date %s is not found in calendar." %
(start_date.strftime("%Y-%m-%d"),))
if end_date not in sessions:
raise KeyError("End date %s is not found in calendar." %
(end_date.strftime("%Y-%m-%d"),))
if end_date < start_date:
raise ValueError("End date %s cannot precede start date %s." %
(end_date.strftime("%Y-%m-%d"),
start_date.strftime("%Y-%m-%d")))
if chunksize is None:
return [(start_date, end_date)]
start_ix, end_ix = sessions.slice_locs(start_date, end_date)
return (
(r[0], r[-1]) for r in partition_all(
chunksize, sessions[start_ix:end_ix]
)
) | [
"def",
"compute_date_range_chunks",
"(",
"sessions",
",",
"start_date",
",",
"end_date",
",",
"chunksize",
")",
":",
"if",
"start_date",
"not",
"in",
"sessions",
":",
"raise",
"KeyError",
"(",
"\"Start date %s is not found in calendar.\"",
"%",
"(",
"start_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
")",
")",
"if",
"end_date",
"not",
"in",
"sessions",
":",
"raise",
"KeyError",
"(",
"\"End date %s is not found in calendar.\"",
"%",
"(",
"end_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
")",
")",
"if",
"end_date",
"<",
"start_date",
":",
"raise",
"ValueError",
"(",
"\"End date %s cannot precede start date %s.\"",
"%",
"(",
"end_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
",",
"start_date",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
")",
")",
"if",
"chunksize",
"is",
"None",
":",
"return",
"[",
"(",
"start_date",
",",
"end_date",
")",
"]",
"start_ix",
",",
"end_ix",
"=",
"sessions",
".",
"slice_locs",
"(",
"start_date",
",",
"end_date",
")",
"return",
"(",
"(",
"r",
"[",
"0",
"]",
",",
"r",
"[",
"-",
"1",
"]",
")",
"for",
"r",
"in",
"partition_all",
"(",
"chunksize",
",",
"sessions",
"[",
"start_ix",
":",
"end_ix",
"]",
")",
")"
] | Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for. | [
"Compute",
"the",
"start",
"and",
"end",
"dates",
"to",
"run",
"a",
"pipeline",
"for",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/date_utils.py#L4-L42 |
25,968 | quantopian/zipline | zipline/pipeline/engine.py | SimplePipelineEngine.run_pipeline | def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
# See notes at the top of this module for a description of the
# algorithm implemented here.
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
domain = self.resolve_domain(pipeline)
graph = pipeline.to_execution_plan(
domain, self._root_mask_term, start_date, end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(
domain, start_date, end_date, extra_rows,
)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(graph, dates, assets, initial_workspace)
return self._to_narrow(
graph.outputs,
results,
results.pop(graph.screen_name),
dates[extra_rows:],
assets,
) | python | def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
# See notes at the top of this module for a description of the
# algorithm implemented here.
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
domain = self.resolve_domain(pipeline)
graph = pipeline.to_execution_plan(
domain, self._root_mask_term, start_date, end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(
domain, start_date, end_date, extra_rows,
)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(graph, dates, assets, initial_workspace)
return self._to_narrow(
graph.outputs,
results,
results.pop(graph.screen_name),
dates[extra_rows:],
assets,
) | [
"def",
"run_pipeline",
"(",
"self",
",",
"pipeline",
",",
"start_date",
",",
"end_date",
")",
":",
"# See notes at the top of this module for a description of the",
"# algorithm implemented here.",
"if",
"end_date",
"<",
"start_date",
":",
"raise",
"ValueError",
"(",
"\"start_date must be before or equal to end_date \\n\"",
"\"start_date=%s, end_date=%s\"",
"%",
"(",
"start_date",
",",
"end_date",
")",
")",
"domain",
"=",
"self",
".",
"resolve_domain",
"(",
"pipeline",
")",
"graph",
"=",
"pipeline",
".",
"to_execution_plan",
"(",
"domain",
",",
"self",
".",
"_root_mask_term",
",",
"start_date",
",",
"end_date",
",",
")",
"extra_rows",
"=",
"graph",
".",
"extra_rows",
"[",
"self",
".",
"_root_mask_term",
"]",
"root_mask",
"=",
"self",
".",
"_compute_root_mask",
"(",
"domain",
",",
"start_date",
",",
"end_date",
",",
"extra_rows",
",",
")",
"dates",
",",
"assets",
",",
"root_mask_values",
"=",
"explode",
"(",
"root_mask",
")",
"initial_workspace",
"=",
"self",
".",
"_populate_initial_workspace",
"(",
"{",
"self",
".",
"_root_mask_term",
":",
"root_mask_values",
",",
"self",
".",
"_root_mask_dates_term",
":",
"as_column",
"(",
"dates",
".",
"values",
")",
"}",
",",
"self",
".",
"_root_mask_term",
",",
"graph",
",",
"dates",
",",
"assets",
",",
")",
"results",
"=",
"self",
".",
"compute_chunk",
"(",
"graph",
",",
"dates",
",",
"assets",
",",
"initial_workspace",
")",
"return",
"self",
".",
"_to_narrow",
"(",
"graph",
".",
"outputs",
",",
"results",
",",
"results",
".",
"pop",
"(",
"graph",
".",
"screen_name",
")",
",",
"dates",
"[",
"extra_rows",
":",
"]",
",",
"assets",
",",
")"
] | Compute a pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline` | [
"Compute",
"a",
"pipeline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L265-L336 |
25,969 | quantopian/zipline | zipline/pipeline/engine.py | SimplePipelineEngine.resolve_domain | def resolve_domain(self, pipeline):
"""Resolve a concrete domain for ``pipeline``.
"""
domain = pipeline.domain(default=self._default_domain)
if domain is GENERIC:
raise ValueError(
"Unable to determine domain for Pipeline.\n"
"Pass domain=<desired domain> to your Pipeline to set a "
"domain."
)
return domain | python | def resolve_domain(self, pipeline):
"""Resolve a concrete domain for ``pipeline``.
"""
domain = pipeline.domain(default=self._default_domain)
if domain is GENERIC:
raise ValueError(
"Unable to determine domain for Pipeline.\n"
"Pass domain=<desired domain> to your Pipeline to set a "
"domain."
)
return domain | [
"def",
"resolve_domain",
"(",
"self",
",",
"pipeline",
")",
":",
"domain",
"=",
"pipeline",
".",
"domain",
"(",
"default",
"=",
"self",
".",
"_default_domain",
")",
"if",
"domain",
"is",
"GENERIC",
":",
"raise",
"ValueError",
"(",
"\"Unable to determine domain for Pipeline.\\n\"",
"\"Pass domain=<desired domain> to your Pipeline to set a \"",
"\"domain.\"",
")",
"return",
"domain"
] | Resolve a concrete domain for ``pipeline``. | [
"Resolve",
"a",
"concrete",
"domain",
"for",
"pipeline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L754-L764 |
25,970 | quantopian/zipline | zipline/utils/api_support.py | require_initialized | def require_initialized(exception):
"""
Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Examples
--------
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | python | def require_initialized(exception):
"""
Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Examples
--------
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | [
"def",
"require_initialized",
"(",
"exception",
")",
":",
"def",
"decorator",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"wrapped_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"raise",
"exception",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped_method",
"return",
"decorator"
] | Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Examples
--------
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize. | [
"Decorator",
"for",
"API",
"methods",
"that",
"should",
"only",
"be",
"called",
"after",
"TradingAlgorithm",
".",
"initialize",
".",
"exception",
"will",
"be",
"raised",
"if",
"the",
"method",
"is",
"called",
"before",
"initialize",
"has",
"completed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/api_support.py#L86-L105 |
25,971 | quantopian/zipline | zipline/utils/api_support.py | disallowed_in_before_trading_start | def disallowed_in_before_trading_start(exception):
"""
Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Examples
--------
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | python | def disallowed_in_before_trading_start(exception):
"""
Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Examples
--------
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | [
"def",
"disallowed_in_before_trading_start",
"(",
"exception",
")",
":",
"def",
"decorator",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"wrapped_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_in_before_trading_start",
":",
"raise",
"exception",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped_method",
"return",
"decorator"
] | Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Examples
--------
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start. | [
"Decorator",
"for",
"API",
"methods",
"that",
"cannot",
"be",
"called",
"from",
"within",
"TradingAlgorithm",
".",
"before_trading_start",
".",
"exception",
"will",
"be",
"raised",
"if",
"the",
"method",
"is",
"called",
"inside",
"before_trading_start",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/api_support.py#L108-L127 |
25,972 | quantopian/zipline | zipline/lib/normalize.py | naive_grouped_rowwise_apply | def naive_grouped_rowwise_apply(data,
group_labels,
func,
func_args=(),
out=None):
"""
Simple implementation of grouped row-wise function application.
Parameters
----------
data : ndarray[ndim=2]
Input array over which to apply a grouped function.
group_labels : ndarray[ndim=2, dtype=int64]
Labels to use to bucket inputs from array.
Should be the same shape as array.
func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]]
Function to apply to pieces of each row in array.
func_args : tuple
Additional positional arguments to provide to each row in array.
out : ndarray, optional
Array into which to write output. If not supplied, a new array of the
same shape as ``data`` is allocated and returned.
Examples
--------
>>> data = np.array([[1., 2., 3.],
... [2., 3., 4.],
... [5., 6., 7.]])
>>> labels = np.array([[0, 0, 1],
... [0, 1, 0],
... [1, 0, 2]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min())
array([[ 0., 1., 0.],
[ 0., 0., 2.],
[ 0., 0., 0.]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum())
array([[ 0.33333333, 0.66666667, 1. ],
[ 0.33333333, 1. , 0.66666667],
[ 1. , 1. , 1. ]])
"""
if out is None:
out = np.empty_like(data)
for (row, label_row, out_row) in zip(data, group_labels, out):
for label in np.unique(label_row):
locs = (label_row == label)
out_row[locs] = func(row[locs], *func_args)
return out | python | def naive_grouped_rowwise_apply(data,
group_labels,
func,
func_args=(),
out=None):
"""
Simple implementation of grouped row-wise function application.
Parameters
----------
data : ndarray[ndim=2]
Input array over which to apply a grouped function.
group_labels : ndarray[ndim=2, dtype=int64]
Labels to use to bucket inputs from array.
Should be the same shape as array.
func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]]
Function to apply to pieces of each row in array.
func_args : tuple
Additional positional arguments to provide to each row in array.
out : ndarray, optional
Array into which to write output. If not supplied, a new array of the
same shape as ``data`` is allocated and returned.
Examples
--------
>>> data = np.array([[1., 2., 3.],
... [2., 3., 4.],
... [5., 6., 7.]])
>>> labels = np.array([[0, 0, 1],
... [0, 1, 0],
... [1, 0, 2]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min())
array([[ 0., 1., 0.],
[ 0., 0., 2.],
[ 0., 0., 0.]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum())
array([[ 0.33333333, 0.66666667, 1. ],
[ 0.33333333, 1. , 0.66666667],
[ 1. , 1. , 1. ]])
"""
if out is None:
out = np.empty_like(data)
for (row, label_row, out_row) in zip(data, group_labels, out):
for label in np.unique(label_row):
locs = (label_row == label)
out_row[locs] = func(row[locs], *func_args)
return out | [
"def",
"naive_grouped_rowwise_apply",
"(",
"data",
",",
"group_labels",
",",
"func",
",",
"func_args",
"=",
"(",
")",
",",
"out",
"=",
"None",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"np",
".",
"empty_like",
"(",
"data",
")",
"for",
"(",
"row",
",",
"label_row",
",",
"out_row",
")",
"in",
"zip",
"(",
"data",
",",
"group_labels",
",",
"out",
")",
":",
"for",
"label",
"in",
"np",
".",
"unique",
"(",
"label_row",
")",
":",
"locs",
"=",
"(",
"label_row",
"==",
"label",
")",
"out_row",
"[",
"locs",
"]",
"=",
"func",
"(",
"row",
"[",
"locs",
"]",
",",
"*",
"func_args",
")",
"return",
"out"
] | Simple implementation of grouped row-wise function application.
Parameters
----------
data : ndarray[ndim=2]
Input array over which to apply a grouped function.
group_labels : ndarray[ndim=2, dtype=int64]
Labels to use to bucket inputs from array.
Should be the same shape as array.
func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]]
Function to apply to pieces of each row in array.
func_args : tuple
Additional positional arguments to provide to each row in array.
out : ndarray, optional
Array into which to write output. If not supplied, a new array of the
same shape as ``data`` is allocated and returned.
Examples
--------
>>> data = np.array([[1., 2., 3.],
... [2., 3., 4.],
... [5., 6., 7.]])
>>> labels = np.array([[0, 0, 1],
... [0, 1, 0],
... [1, 0, 2]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min())
array([[ 0., 1., 0.],
[ 0., 0., 2.],
[ 0., 0., 0.]])
>>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum())
array([[ 0.33333333, 0.66666667, 1. ],
[ 0.33333333, 1. , 0.66666667],
[ 1. , 1. , 1. ]]) | [
"Simple",
"implementation",
"of",
"grouped",
"row",
"-",
"wise",
"function",
"application",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/normalize.py#L4-L51 |
25,973 | quantopian/zipline | zipline/assets/synthetic.py | make_rotating_equity_info | def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
) | python | def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
) | [
"def",
"make_rotating_equity_info",
"(",
"num_assets",
",",
"first_start",
",",
"frequency",
",",
"periods_between_starts",
",",
"asset_lifetime",
",",
"exchange",
"=",
"'TEST'",
")",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'symbol'",
":",
"[",
"chr",
"(",
"ord",
"(",
"'A'",
")",
"+",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"num_assets",
")",
"]",
",",
"# Start a new asset every `periods_between_starts` days.",
"'start_date'",
":",
"pd",
".",
"date_range",
"(",
"first_start",
",",
"freq",
"=",
"(",
"periods_between_starts",
"*",
"frequency",
")",
",",
"periods",
"=",
"num_assets",
",",
")",
",",
"# Each asset lasts for `asset_lifetime` days.",
"'end_date'",
":",
"pd",
".",
"date_range",
"(",
"first_start",
"+",
"(",
"asset_lifetime",
"*",
"frequency",
")",
",",
"freq",
"=",
"(",
"periods_between_starts",
"*",
"frequency",
")",
",",
"periods",
"=",
"num_assets",
",",
")",
",",
"'exchange'",
":",
"exchange",
",",
"}",
",",
"index",
"=",
"range",
"(",
"num_assets",
")",
",",
")"
] | Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets. | [
"Create",
"a",
"DataFrame",
"representing",
"lifetimes",
"of",
"assets",
"that",
"are",
"constantly",
"rotating",
"in",
"and",
"out",
"of",
"existence",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L11-L59 |
25,974 | quantopian/zipline | zipline/assets/synthetic.py | make_simple_equity_info | def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
) | python | def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
) | [
"def",
"make_simple_equity_info",
"(",
"sids",
",",
"start_date",
",",
"end_date",
",",
"symbols",
"=",
"None",
",",
"names",
"=",
"None",
",",
"exchange",
"=",
"'TEST'",
")",
":",
"num_assets",
"=",
"len",
"(",
"sids",
")",
"if",
"symbols",
"is",
"None",
":",
"symbols",
"=",
"list",
"(",
"ascii_uppercase",
"[",
":",
"num_assets",
"]",
")",
"else",
":",
"symbols",
"=",
"list",
"(",
"symbols",
")",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"[",
"str",
"(",
"s",
")",
"+",
"\" INC.\"",
"for",
"s",
"in",
"symbols",
"]",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'symbol'",
":",
"symbols",
",",
"'start_date'",
":",
"pd",
".",
"to_datetime",
"(",
"[",
"start_date",
"]",
"*",
"num_assets",
")",
",",
"'end_date'",
":",
"pd",
".",
"to_datetime",
"(",
"[",
"end_date",
"]",
"*",
"num_assets",
")",
",",
"'asset_name'",
":",
"list",
"(",
"names",
")",
",",
"'exchange'",
":",
"exchange",
",",
"}",
",",
"index",
"=",
"sids",
",",
"columns",
"=",
"(",
"'start_date'",
",",
"'end_date'",
",",
"'symbol'",
",",
"'exchange'",
",",
"'asset_name'",
",",
")",
",",
")"
] | Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets. | [
"Create",
"a",
"DataFrame",
"representing",
"assets",
"that",
"exist",
"for",
"the",
"full",
"duration",
"between",
"start_date",
"and",
"end_date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L62-L117 |
25,975 | quantopian/zipline | zipline/assets/synthetic.py | make_simple_multi_country_equity_info | def make_simple_multi_country_equity_info(countries_to_sids,
countries_to_exchanges,
start_date,
end_date):
"""Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries.
"""
sids = []
symbols = []
exchanges = []
for country, country_sids in countries_to_sids.items():
exchange = countries_to_exchanges[country]
for i, sid in enumerate(country_sids):
sids.append(sid)
symbols.append('-'.join([country, str(i)]))
exchanges.append(exchange)
return pd.DataFrame(
{
'symbol': symbols,
'start_date': start_date,
'end_date': end_date,
'asset_name': symbols,
'exchange': exchanges,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
) | python | def make_simple_multi_country_equity_info(countries_to_sids,
countries_to_exchanges,
start_date,
end_date):
"""Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries.
"""
sids = []
symbols = []
exchanges = []
for country, country_sids in countries_to_sids.items():
exchange = countries_to_exchanges[country]
for i, sid in enumerate(country_sids):
sids.append(sid)
symbols.append('-'.join([country, str(i)]))
exchanges.append(exchange)
return pd.DataFrame(
{
'symbol': symbols,
'start_date': start_date,
'end_date': end_date,
'asset_name': symbols,
'exchange': exchanges,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
) | [
"def",
"make_simple_multi_country_equity_info",
"(",
"countries_to_sids",
",",
"countries_to_exchanges",
",",
"start_date",
",",
"end_date",
")",
":",
"sids",
"=",
"[",
"]",
"symbols",
"=",
"[",
"]",
"exchanges",
"=",
"[",
"]",
"for",
"country",
",",
"country_sids",
"in",
"countries_to_sids",
".",
"items",
"(",
")",
":",
"exchange",
"=",
"countries_to_exchanges",
"[",
"country",
"]",
"for",
"i",
",",
"sid",
"in",
"enumerate",
"(",
"country_sids",
")",
":",
"sids",
".",
"append",
"(",
"sid",
")",
"symbols",
".",
"append",
"(",
"'-'",
".",
"join",
"(",
"[",
"country",
",",
"str",
"(",
"i",
")",
"]",
")",
")",
"exchanges",
".",
"append",
"(",
"exchange",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"{",
"'symbol'",
":",
"symbols",
",",
"'start_date'",
":",
"start_date",
",",
"'end_date'",
":",
"end_date",
",",
"'asset_name'",
":",
"symbols",
",",
"'exchange'",
":",
"exchanges",
",",
"}",
",",
"index",
"=",
"sids",
",",
"columns",
"=",
"(",
"'start_date'",
",",
"'end_date'",
",",
"'symbol'",
",",
"'exchange'",
",",
"'asset_name'",
",",
")",
",",
")"
] | Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries. | [
"Create",
"a",
"DataFrame",
"representing",
"assets",
"that",
"exist",
"for",
"the",
"full",
"duration",
"between",
"start_date",
"and",
"end_date",
"from",
"multiple",
"countries",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L120-L154 |
25,976 | quantopian/zipline | zipline/assets/synthetic.py | make_jagged_equity_info | def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame | python | def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame | [
"def",
"make_jagged_equity_info",
"(",
"num_assets",
",",
"start_date",
",",
"first_end",
",",
"frequency",
",",
"periods_between_ends",
",",
"auto_close_delta",
")",
":",
"frame",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'symbol'",
":",
"[",
"chr",
"(",
"ord",
"(",
"'A'",
")",
"+",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"num_assets",
")",
"]",
",",
"'start_date'",
":",
"start_date",
",",
"'end_date'",
":",
"pd",
".",
"date_range",
"(",
"first_end",
",",
"freq",
"=",
"(",
"periods_between_ends",
"*",
"frequency",
")",
",",
"periods",
"=",
"num_assets",
",",
")",
",",
"'exchange'",
":",
"'TEST'",
",",
"}",
",",
"index",
"=",
"range",
"(",
"num_assets",
")",
",",
")",
"# Explicitly pass None to disable setting the auto_close_date column.",
"if",
"auto_close_delta",
"is",
"not",
"None",
":",
"frame",
"[",
"'auto_close_date'",
"]",
"=",
"frame",
"[",
"'end_date'",
"]",
"+",
"auto_close_delta",
"return",
"frame"
] | Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets. | [
"Create",
"a",
"DataFrame",
"representing",
"assets",
"that",
"all",
"begin",
"at",
"the",
"same",
"start",
"date",
"but",
"have",
"cascading",
"end",
"dates",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L157-L204 |
25,977 | quantopian/zipline | zipline/assets/synthetic.py | make_future_info | def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CMES_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01)
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
iteritems(month_codes),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': notice_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return pd.DataFrame.from_records(contracts, index='sid') | python | def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CMES_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01)
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
iteritems(month_codes),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': notice_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return pd.DataFrame.from_records(contracts, index='sid') | [
"def",
"make_future_info",
"(",
"first_sid",
",",
"root_symbols",
",",
"years",
",",
"notice_date_func",
",",
"expiration_date_func",
",",
"start_date_func",
",",
"month_codes",
"=",
"None",
",",
"multiplier",
"=",
"500",
")",
":",
"if",
"month_codes",
"is",
"None",
":",
"month_codes",
"=",
"CMES_CODE_TO_MONTH",
"year_strs",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"years",
")",
")",
"years",
"=",
"[",
"pd",
".",
"Timestamp",
"(",
"s",
",",
"tz",
"=",
"'UTC'",
")",
"for",
"s",
"in",
"year_strs",
"]",
"# Pairs of string/date like ('K06', 2006-05-01)",
"contract_suffix_to_beginning_of_month",
"=",
"tuple",
"(",
"(",
"month_code",
"+",
"year_str",
"[",
"-",
"2",
":",
"]",
",",
"year",
"+",
"MonthBegin",
"(",
"month_num",
")",
")",
"for",
"(",
"(",
"year",
",",
"year_str",
")",
",",
"(",
"month_code",
",",
"month_num",
")",
")",
"in",
"product",
"(",
"zip",
"(",
"years",
",",
"year_strs",
")",
",",
"iteritems",
"(",
"month_codes",
")",
",",
")",
")",
"contracts",
"=",
"[",
"]",
"parts",
"=",
"product",
"(",
"root_symbols",
",",
"contract_suffix_to_beginning_of_month",
")",
"for",
"sid",
",",
"(",
"root_sym",
",",
"(",
"suffix",
",",
"month_begin",
")",
")",
"in",
"enumerate",
"(",
"parts",
",",
"first_sid",
")",
":",
"contracts",
".",
"append",
"(",
"{",
"'sid'",
":",
"sid",
",",
"'root_symbol'",
":",
"root_sym",
",",
"'symbol'",
":",
"root_sym",
"+",
"suffix",
",",
"'start_date'",
":",
"start_date_func",
"(",
"month_begin",
")",
",",
"'notice_date'",
":",
"notice_date_func",
"(",
"month_begin",
")",
",",
"'expiration_date'",
":",
"notice_date_func",
"(",
"month_begin",
")",
",",
"'multiplier'",
":",
"multiplier",
",",
"'exchange'",
":",
"\"TEST\"",
",",
"}",
")",
"return",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"contracts",
",",
"index",
"=",
"'sid'",
")"
] | Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter. | [
"Create",
"a",
"DataFrame",
"representing",
"futures",
"for",
"root_symbols",
"during",
"year",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L207-L281 |
25,978 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.startswith | def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
) | python | def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
) | [
"def",
"startswith",
"(",
"self",
",",
"prefix",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"LabelArray",
".",
"startswith",
",",
"opargs",
"=",
"(",
"prefix",
",",
")",
",",
")"
] | Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``. | [
"Construct",
"a",
"Filter",
"matching",
"values",
"starting",
"with",
"prefix",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L150-L169 |
25,979 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.endswith | def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
) | python | def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
) | [
"def",
"endswith",
"(",
"self",
",",
"suffix",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"LabelArray",
".",
"endswith",
",",
"opargs",
"=",
"(",
"suffix",
",",
")",
",",
")"
] | Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``. | [
"Construct",
"a",
"Filter",
"matching",
"values",
"ending",
"with",
"suffix",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L173-L192 |
25,980 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.has_substring | def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
) | python | def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
) | [
"def",
"has_substring",
"(",
"self",
",",
"substring",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"LabelArray",
".",
"has_substring",
",",
"opargs",
"=",
"(",
"substring",
",",
")",
",",
")"
] | Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``. | [
"Construct",
"a",
"Filter",
"matching",
"values",
"containing",
"substring",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L196-L215 |
25,981 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.matches | def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
) | python | def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
) | [
"def",
"matches",
"(",
"self",
",",
"pattern",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"LabelArray",
".",
"matches",
",",
"opargs",
"=",
"(",
"pattern",
",",
")",
",",
")"
] | Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>` | [
"Construct",
"a",
"Filter",
"that",
"checks",
"regex",
"matches",
"against",
"pattern",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L219-L242 |
25,982 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.element_of | def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype | python | def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype | [
"def",
"element_of",
"(",
"self",
",",
"choices",
")",
":",
"try",
":",
"choices",
"=",
"frozenset",
"(",
"choices",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"\"Expected `choices` to be an iterable of hashable values,\"",
"\" but got {} instead.\\n\"",
"\"This caused the following error: {!r}.\"",
".",
"format",
"(",
"choices",
",",
"e",
")",
")",
"if",
"self",
".",
"missing_value",
"in",
"choices",
":",
"raise",
"ValueError",
"(",
"\"Found self.missing_value ({mv!r}) in choices supplied to\"",
"\" {typename}.{meth_name}().\\n\"",
"\"Missing values have NaN semantics, so the\"",
"\" requested comparison would always produce False.\\n\"",
"\"Use the isnull() method to check for missing values.\\n\"",
"\"Received choices were {choices}.\"",
".",
"format",
"(",
"mv",
"=",
"self",
".",
"missing_value",
",",
"typename",
"=",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
")",
",",
"choices",
"=",
"sorted",
"(",
"choices",
")",
",",
"meth_name",
"=",
"self",
".",
"element_of",
".",
"__name__",
",",
")",
")",
"def",
"only_contains",
"(",
"type_",
",",
"values",
")",
":",
"return",
"all",
"(",
"isinstance",
"(",
"v",
",",
"type_",
")",
"for",
"v",
"in",
"values",
")",
"if",
"self",
".",
"dtype",
"==",
"int64_dtype",
":",
"if",
"only_contains",
"(",
"int",
",",
"choices",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"vectorized_is_element",
",",
"opargs",
"=",
"(",
"choices",
",",
")",
",",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Found non-int in choices for {typename}.element_of.\\n\"",
"\"Supplied choices were {choices}.\"",
".",
"format",
"(",
"typename",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"choices",
"=",
"choices",
",",
")",
")",
"elif",
"self",
".",
"dtype",
"==",
"categorical_dtype",
":",
"if",
"only_contains",
"(",
"(",
"bytes",
",",
"unicode",
")",
",",
"choices",
")",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"LabelArray",
".",
"element_of",
",",
"opargs",
"=",
"(",
"choices",
",",
")",
",",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Found non-string in choices for {typename}.element_of.\\n\"",
"\"Supplied choices were {choices}.\"",
".",
"format",
"(",
"typename",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"choices",
"=",
"choices",
",",
")",
")",
"assert",
"False",
",",
"\"Unknown dtype in Classifier.element_of %s.\"",
"%",
"self",
".",
"dtype"
] | Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``. | [
"Construct",
"a",
"Filter",
"indicating",
"whether",
"values",
"are",
"in",
"choices",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L264-L336 |
25,983 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier.to_workspace_value | def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
'Expected a Categorical, got %r.' % type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
) | python | def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
'Expected a Categorical, got %r.' % type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
) | [
"def",
"to_workspace_value",
"(",
"self",
",",
"result",
",",
"assets",
")",
":",
"if",
"self",
".",
"dtype",
"==",
"int64_dtype",
":",
"return",
"super",
"(",
"Classifier",
",",
"self",
")",
".",
"to_workspace_value",
"(",
"result",
",",
"assets",
")",
"assert",
"isinstance",
"(",
"result",
".",
"values",
",",
"pd",
".",
"Categorical",
")",
",",
"(",
"'Expected a Categorical, got %r.'",
"%",
"type",
"(",
"result",
".",
"values",
")",
")",
"with_missing",
"=",
"pd",
".",
"Series",
"(",
"data",
"=",
"pd",
".",
"Categorical",
"(",
"result",
".",
"values",
",",
"result",
".",
"values",
".",
"categories",
".",
"union",
"(",
"[",
"self",
".",
"missing_value",
"]",
")",
",",
")",
",",
"index",
"=",
"result",
".",
"index",
",",
")",
"return",
"LabelArray",
"(",
"super",
"(",
"Classifier",
",",
"self",
")",
".",
"to_workspace_value",
"(",
"with_missing",
",",
"assets",
",",
")",
",",
"self",
".",
"missing_value",
",",
")"
] | Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`. | [
"Called",
"with",
"the",
"result",
"of",
"a",
"pipeline",
".",
"This",
"needs",
"to",
"return",
"an",
"object",
"which",
"can",
"be",
"put",
"into",
"the",
"workspace",
"to",
"continue",
"doing",
"computations",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L345-L371 |
25,984 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | Classifier._to_integral | def _to_integral(self, output_array):
"""
Convert an array produced by this classifier into an array of integer
labels and a missing value label.
"""
if self.dtype == int64_dtype:
group_labels = output_array
null_label = self.missing_value
elif self.dtype == categorical_dtype:
# Coerce LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = output_array.as_int_array()
null_label = output_array.missing_value_code
else:
raise AssertionError(
"Unexpected Classifier dtype: %s." % self.dtype
)
return group_labels, null_label | python | def _to_integral(self, output_array):
"""
Convert an array produced by this classifier into an array of integer
labels and a missing value label.
"""
if self.dtype == int64_dtype:
group_labels = output_array
null_label = self.missing_value
elif self.dtype == categorical_dtype:
# Coerce LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = output_array.as_int_array()
null_label = output_array.missing_value_code
else:
raise AssertionError(
"Unexpected Classifier dtype: %s." % self.dtype
)
return group_labels, null_label | [
"def",
"_to_integral",
"(",
"self",
",",
"output_array",
")",
":",
"if",
"self",
".",
"dtype",
"==",
"int64_dtype",
":",
"group_labels",
"=",
"output_array",
"null_label",
"=",
"self",
".",
"missing_value",
"elif",
"self",
".",
"dtype",
"==",
"categorical_dtype",
":",
"# Coerce LabelArray into an isomorphic array of ints. This is",
"# necessary because np.where doesn't know about LabelArrays or the",
"# void dtype.",
"group_labels",
"=",
"output_array",
".",
"as_int_array",
"(",
")",
"null_label",
"=",
"output_array",
".",
"missing_value_code",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Unexpected Classifier dtype: %s.\"",
"%",
"self",
".",
"dtype",
")",
"return",
"group_labels",
",",
"null_label"
] | Convert an array produced by this classifier into an array of integer
labels and a missing value label. | [
"Convert",
"an",
"array",
"produced",
"by",
"this",
"classifier",
"into",
"an",
"array",
"of",
"integer",
"labels",
"and",
"a",
"missing",
"value",
"label",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L381-L399 |
25,985 | quantopian/zipline | zipline/pipeline/classifiers/classifier.py | CustomClassifier._allocate_output | def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape) | python | def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape) | [
"def",
"_allocate_output",
"(",
"self",
",",
"windows",
",",
"shape",
")",
":",
"if",
"self",
".",
"dtype",
"==",
"int64_dtype",
":",
"return",
"super",
"(",
"CustomClassifier",
",",
"self",
")",
".",
"_allocate_output",
"(",
"windows",
",",
"shape",
",",
")",
"# This is a little bit of a hack. We might not know what the",
"# categories for a LabelArray are until it's actually been loaded, so",
"# we need to look at the underlying data.",
"return",
"windows",
"[",
"0",
"]",
".",
"data",
".",
"empty_like",
"(",
"shape",
")"
] | Override the default array allocation to produce a LabelArray when we
have a string-like dtype. | [
"Override",
"the",
"default",
"array",
"allocation",
"to",
"produce",
"a",
"LabelArray",
"when",
"we",
"have",
"a",
"string",
"-",
"like",
"dtype",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L517-L531 |
25,986 | quantopian/zipline | zipline/utils/input_validation.py | verify_indices_all_unique | def verify_indices_all_unique(obj):
"""
Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries.
"""
axis_names = [
('index',), # Series
('index', 'columns'), # DataFrame
('items', 'major_axis', 'minor_axis') # Panel
][obj.ndim - 1] # ndim = 1 should go to entry 0,
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplicate entries in {type}.{axis}: {dupes}.".format(
type=type(obj).__name__,
axis=axis_name,
dupes=sorted(index[index.duplicated()]),
)
)
return obj | python | def verify_indices_all_unique(obj):
"""
Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries.
"""
axis_names = [
('index',), # Series
('index', 'columns'), # DataFrame
('items', 'major_axis', 'minor_axis') # Panel
][obj.ndim - 1] # ndim = 1 should go to entry 0,
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplicate entries in {type}.{axis}: {dupes}.".format(
type=type(obj).__name__,
axis=axis_name,
dupes=sorted(index[index.duplicated()]),
)
)
return obj | [
"def",
"verify_indices_all_unique",
"(",
"obj",
")",
":",
"axis_names",
"=",
"[",
"(",
"'index'",
",",
")",
",",
"# Series",
"(",
"'index'",
",",
"'columns'",
")",
",",
"# DataFrame",
"(",
"'items'",
",",
"'major_axis'",
",",
"'minor_axis'",
")",
"# Panel",
"]",
"[",
"obj",
".",
"ndim",
"-",
"1",
"]",
"# ndim = 1 should go to entry 0,",
"for",
"axis_name",
",",
"index",
"in",
"zip",
"(",
"axis_names",
",",
"obj",
".",
"axes",
")",
":",
"if",
"index",
".",
"is_unique",
":",
"continue",
"raise",
"ValueError",
"(",
"\"Duplicate entries in {type}.{axis}: {dupes}.\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
",",
"axis",
"=",
"axis_name",
",",
"dupes",
"=",
"sorted",
"(",
"index",
"[",
"index",
".",
"duplicated",
"(",
")",
"]",
")",
",",
")",
")",
"return",
"obj"
] | Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries. | [
"Check",
"that",
"all",
"axes",
"of",
"a",
"pandas",
"object",
"are",
"unique",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L50-L86 |
25,987 | quantopian/zipline | zipline/utils/input_validation.py | optionally | def optionally(preprocessor):
"""Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Examples
--------
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True
"""
@wraps(preprocessor)
def wrapper(func, argname, arg):
return arg if arg is None else preprocessor(func, argname, arg)
return wrapper | python | def optionally(preprocessor):
"""Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Examples
--------
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True
"""
@wraps(preprocessor)
def wrapper(func, argname, arg):
return arg if arg is None else preprocessor(func, argname, arg)
return wrapper | [
"def",
"optionally",
"(",
"preprocessor",
")",
":",
"@",
"wraps",
"(",
"preprocessor",
")",
"def",
"wrapper",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"return",
"arg",
"if",
"arg",
"is",
"None",
"else",
"preprocessor",
"(",
"func",
",",
"argname",
",",
"arg",
")",
"return",
"wrapper"
] | Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Examples
--------
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True | [
"Modify",
"a",
"preprocessor",
"to",
"explicitly",
"allow",
"None",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L89-L126 |
25,988 | quantopian/zipline | zipline/utils/input_validation.py | ensure_dtype | def ensure_dtype(func, argname, arg):
"""
Argument preprocessor that converts the input into a numpy dtype.
Examples
--------
>>> import numpy as np
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64')
"""
try:
return dtype(arg)
except TypeError:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a numpy dtype.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) | python | def ensure_dtype(func, argname, arg):
"""
Argument preprocessor that converts the input into a numpy dtype.
Examples
--------
>>> import numpy as np
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64')
"""
try:
return dtype(arg)
except TypeError:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a numpy dtype.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) | [
"def",
"ensure_dtype",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"try",
":",
"return",
"dtype",
"(",
"arg",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"{func}() couldn't convert argument \"",
"\"{argname}={arg!r} to a numpy dtype.\"",
".",
"format",
"(",
"func",
"=",
"_qualified_name",
"(",
"func",
")",
",",
"argname",
"=",
"argname",
",",
"arg",
"=",
"arg",
",",
")",
",",
")"
] | Argument preprocessor that converts the input into a numpy dtype.
Examples
--------
>>> import numpy as np
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64') | [
"Argument",
"preprocessor",
"that",
"converts",
"the",
"input",
"into",
"a",
"numpy",
"dtype",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L143-L168 |
25,989 | quantopian/zipline | zipline/utils/input_validation.py | ensure_timezone | def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) | python | def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
) | [
"def",
"ensure_timezone",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"tzinfo",
")",
":",
"return",
"arg",
"if",
"isinstance",
"(",
"arg",
",",
"string_types",
")",
":",
"return",
"timezone",
"(",
"arg",
")",
"raise",
"TypeError",
"(",
"\"{func}() couldn't convert argument \"",
"\"{argname}={arg!r} to a timezone.\"",
".",
"format",
"(",
"func",
"=",
"_qualified_name",
"(",
"func",
")",
",",
"argname",
"=",
"argname",
",",
"arg",
"=",
"arg",
",",
")",
",",
")"
] | Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC> | [
"Argument",
"preprocessor",
"that",
"converts",
"the",
"input",
"into",
"a",
"tzinfo",
"object",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L171-L195 |
25,990 | quantopian/zipline | zipline/utils/input_validation.py | ensure_timestamp | def ensure_timestamp(func, argname, arg):
"""Argument preprocessor that converts the input into a pandas Timestamp
object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00')
"""
try:
return pd.Timestamp(arg)
except ValueError as e:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a pandas Timestamp.\n"
"Original error was: {t}: {e}".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
t=_qualified_name(type(e)),
e=e,
),
) | python | def ensure_timestamp(func, argname, arg):
"""Argument preprocessor that converts the input into a pandas Timestamp
object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00')
"""
try:
return pd.Timestamp(arg)
except ValueError as e:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a pandas Timestamp.\n"
"Original error was: {t}: {e}".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
t=_qualified_name(type(e)),
e=e,
),
) | [
"def",
"ensure_timestamp",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"try",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"arg",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"\"{func}() couldn't convert argument \"",
"\"{argname}={arg!r} to a pandas Timestamp.\\n\"",
"\"Original error was: {t}: {e}\"",
".",
"format",
"(",
"func",
"=",
"_qualified_name",
"(",
"func",
")",
",",
"argname",
"=",
"argname",
",",
"arg",
"=",
"arg",
",",
"t",
"=",
"_qualified_name",
"(",
"type",
"(",
"e",
")",
")",
",",
"e",
"=",
"e",
",",
")",
",",
")"
] | Argument preprocessor that converts the input into a pandas Timestamp
object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00') | [
"Argument",
"preprocessor",
"that",
"converts",
"the",
"input",
"into",
"a",
"pandas",
"Timestamp",
"object",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L198-L224 |
25,991 | quantopian/zipline | zipline/utils/input_validation.py | expect_dtypes | def expect_dtypes(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected numpy dtypes.
Examples
--------
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (dtype, tuple)):
raise TypeError(
"expect_dtypes() expected a numpy dtype or tuple of dtypes"
" for argument {name!r}, but got {dtype} instead.".format(
name=name, dtype=dtype,
)
)
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
@preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_dtype(dtypes):
"""
Factory for dtype-checking functions that work with the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# name. Otherwise just show the value.
try:
value_to_show = value.dtype.name
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a value with dtype {dtype_str} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=get_funcname(func),
dtype_str=' or '.join(repr(d.name) for d in dtypes),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattr(argvalue, 'dtype', object()) not in dtypes:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_dtype, named)) | python | def expect_dtypes(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected numpy dtypes.
Examples
--------
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (dtype, tuple)):
raise TypeError(
"expect_dtypes() expected a numpy dtype or tuple of dtypes"
" for argument {name!r}, but got {dtype} instead.".format(
name=name, dtype=dtype,
)
)
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
@preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_dtype(dtypes):
"""
Factory for dtype-checking functions that work with the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# name. Otherwise just show the value.
try:
value_to_show = value.dtype.name
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a value with dtype {dtype_str} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=get_funcname(func),
dtype_str=' or '.join(repr(d.name) for d in dtypes),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattr(argvalue, 'dtype', object()) not in dtypes:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_dtype, named)) | [
"def",
"expect_dtypes",
"(",
"__funcname",
"=",
"_qualified_name",
",",
"*",
"*",
"named",
")",
":",
"for",
"name",
",",
"type_",
"in",
"iteritems",
"(",
"named",
")",
":",
"if",
"not",
"isinstance",
"(",
"type_",
",",
"(",
"dtype",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"expect_dtypes() expected a numpy dtype or tuple of dtypes\"",
"\" for argument {name!r}, but got {dtype} instead.\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"dtype",
"=",
"dtype",
",",
")",
")",
"if",
"isinstance",
"(",
"__funcname",
",",
"str",
")",
":",
"def",
"get_funcname",
"(",
"_",
")",
":",
"return",
"__funcname",
"else",
":",
"get_funcname",
"=",
"__funcname",
"@",
"preprocess",
"(",
"dtypes",
"=",
"call",
"(",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"else",
"(",
"x",
",",
")",
")",
")",
"def",
"_expect_dtype",
"(",
"dtypes",
")",
":",
"\"\"\"\n Factory for dtype-checking functions that work with the @preprocess\n decorator.\n \"\"\"",
"def",
"error_message",
"(",
"func",
",",
"argname",
",",
"value",
")",
":",
"# If the bad value has a dtype, but it's wrong, show the dtype",
"# name. Otherwise just show the value.",
"try",
":",
"value_to_show",
"=",
"value",
".",
"dtype",
".",
"name",
"except",
"AttributeError",
":",
"value_to_show",
"=",
"value",
"return",
"(",
"\"{funcname}() expected a value with dtype {dtype_str} \"",
"\"for argument {argname!r}, but got {value!r} instead.\"",
")",
".",
"format",
"(",
"funcname",
"=",
"get_funcname",
"(",
"func",
")",
",",
"dtype_str",
"=",
"' or '",
".",
"join",
"(",
"repr",
"(",
"d",
".",
"name",
")",
"for",
"d",
"in",
"dtypes",
")",
",",
"argname",
"=",
"argname",
",",
"value",
"=",
"value_to_show",
",",
")",
"def",
"_actual_preprocessor",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
":",
"if",
"getattr",
"(",
"argvalue",
",",
"'dtype'",
",",
"object",
"(",
")",
")",
"not",
"in",
"dtypes",
":",
"raise",
"TypeError",
"(",
"error_message",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
")",
"return",
"argvalue",
"return",
"_actual_preprocessor",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_expect_dtype",
",",
"named",
")",
")"
] | Preprocessing decorator that verifies inputs have expected numpy dtypes.
Examples
--------
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead. | [
"Preprocessing",
"decorator",
"that",
"verifies",
"inputs",
"have",
"expected",
"numpy",
"dtypes",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L227-L292 |
25,992 | quantopian/zipline | zipline/utils/input_validation.py | expect_kinds | def expect_kinds(**named):
"""
Preprocessing decorator that verifies inputs have expected dtype kinds.
Examples
--------
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead.
"""
for name, kind in iteritems(named):
if not isinstance(kind, (str, tuple)):
raise TypeError(
"expect_dtype_kinds() expected a string or tuple of strings"
" for argument {name!r}, but got {kind} instead.".format(
name=name, kind=dtype,
)
)
@preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_kind(kinds):
"""
Factory for kind-checking functions that work the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# kind. Otherwise just show the value.
try:
value_to_show = value.dtype.kind
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a numpy object of kind {kinds} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=_qualified_name(func),
kinds=' or '.join(map(repr, kinds)),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_kind, named)) | python | def expect_kinds(**named):
"""
Preprocessing decorator that verifies inputs have expected dtype kinds.
Examples
--------
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead.
"""
for name, kind in iteritems(named):
if not isinstance(kind, (str, tuple)):
raise TypeError(
"expect_dtype_kinds() expected a string or tuple of strings"
" for argument {name!r}, but got {kind} instead.".format(
name=name, kind=dtype,
)
)
@preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_kind(kinds):
"""
Factory for kind-checking functions that work the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# kind. Otherwise just show the value.
try:
value_to_show = value.dtype.kind
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a numpy object of kind {kinds} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=_qualified_name(func),
kinds=' or '.join(map(repr, kinds)),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_kind, named)) | [
"def",
"expect_kinds",
"(",
"*",
"*",
"named",
")",
":",
"for",
"name",
",",
"kind",
"in",
"iteritems",
"(",
"named",
")",
":",
"if",
"not",
"isinstance",
"(",
"kind",
",",
"(",
"str",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"expect_dtype_kinds() expected a string or tuple of strings\"",
"\" for argument {name!r}, but got {kind} instead.\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"kind",
"=",
"dtype",
",",
")",
")",
"@",
"preprocess",
"(",
"kinds",
"=",
"call",
"(",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"else",
"(",
"x",
",",
")",
")",
")",
"def",
"_expect_kind",
"(",
"kinds",
")",
":",
"\"\"\"\n Factory for kind-checking functions that work the @preprocess\n decorator.\n \"\"\"",
"def",
"error_message",
"(",
"func",
",",
"argname",
",",
"value",
")",
":",
"# If the bad value has a dtype, but it's wrong, show the dtype",
"# kind. Otherwise just show the value.",
"try",
":",
"value_to_show",
"=",
"value",
".",
"dtype",
".",
"kind",
"except",
"AttributeError",
":",
"value_to_show",
"=",
"value",
"return",
"(",
"\"{funcname}() expected a numpy object of kind {kinds} \"",
"\"for argument {argname!r}, but got {value!r} instead.\"",
")",
".",
"format",
"(",
"funcname",
"=",
"_qualified_name",
"(",
"func",
")",
",",
"kinds",
"=",
"' or '",
".",
"join",
"(",
"map",
"(",
"repr",
",",
"kinds",
")",
")",
",",
"argname",
"=",
"argname",
",",
"value",
"=",
"value_to_show",
",",
")",
"def",
"_actual_preprocessor",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
":",
"if",
"getattrs",
"(",
"argvalue",
",",
"(",
"'dtype'",
",",
"'kind'",
")",
",",
"object",
"(",
")",
")",
"not",
"in",
"kinds",
":",
"raise",
"TypeError",
"(",
"error_message",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
")",
"return",
"argvalue",
"return",
"_actual_preprocessor",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_expect_kind",
",",
"named",
")",
")"
] | Preprocessing decorator that verifies inputs have expected dtype kinds.
Examples
--------
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead. | [
"Preprocessing",
"decorator",
"that",
"verifies",
"inputs",
"have",
"expected",
"dtype",
"kinds",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L295-L355 |
25,993 | quantopian/zipline | zipline/utils/input_validation.py | expect_types | def expect_types(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Examples
--------
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
def _expect_type(type_):
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=' or '.join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return make_check(
exc_type=TypeError,
template=template,
pred=lambda v: not isinstance(v, type_),
actual=compose(_qualified_name, type),
funcname=__funcname,
)
return preprocess(**valmap(_expect_type, named)) | python | def expect_types(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Examples
--------
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
def _expect_type(type_):
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=' or '.join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return make_check(
exc_type=TypeError,
template=template,
pred=lambda v: not isinstance(v, type_),
actual=compose(_qualified_name, type),
funcname=__funcname,
)
return preprocess(**valmap(_expect_type, named)) | [
"def",
"expect_types",
"(",
"__funcname",
"=",
"_qualified_name",
",",
"*",
"*",
"named",
")",
":",
"for",
"name",
",",
"type_",
"in",
"iteritems",
"(",
"named",
")",
":",
"if",
"not",
"isinstance",
"(",
"type_",
",",
"(",
"type",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"expect_types() expected a type or tuple of types for \"",
"\"argument '{name}', but got {type_} instead.\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"type_",
"=",
"type_",
",",
")",
")",
"def",
"_expect_type",
"(",
"type_",
")",
":",
"# Slightly different messages for type and tuple of types.",
"_template",
"=",
"(",
"\"%(funcname)s() expected a value of type {type_or_types} \"",
"\"for argument '%(argname)s', but got %(actual)s instead.\"",
")",
"if",
"isinstance",
"(",
"type_",
",",
"tuple",
")",
":",
"template",
"=",
"_template",
".",
"format",
"(",
"type_or_types",
"=",
"' or '",
".",
"join",
"(",
"map",
"(",
"_qualified_name",
",",
"type_",
")",
")",
")",
"else",
":",
"template",
"=",
"_template",
".",
"format",
"(",
"type_or_types",
"=",
"_qualified_name",
"(",
"type_",
")",
")",
"return",
"make_check",
"(",
"exc_type",
"=",
"TypeError",
",",
"template",
"=",
"template",
",",
"pred",
"=",
"lambda",
"v",
":",
"not",
"isinstance",
"(",
"v",
",",
"type_",
")",
",",
"actual",
"=",
"compose",
"(",
"_qualified_name",
",",
"type",
")",
",",
"funcname",
"=",
"__funcname",
",",
")",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_expect_type",
",",
"named",
")",
")"
] | Preprocessing decorator that verifies inputs have expected types.
Examples
--------
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name. | [
"Preprocessing",
"decorator",
"that",
"verifies",
"inputs",
"have",
"expected",
"types",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L358-L413 |
25,994 | quantopian/zipline | zipline/utils/input_validation.py | make_check | def make_check(exc_type, template, pred, actual, funcname):
"""
Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name.
"""
if isinstance(funcname, str):
def get_funcname(_):
return funcname
else:
get_funcname = funcname
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc_type(
template % {
'funcname': get_funcname(func),
'argname': argname,
'actual': actual(argvalue),
},
)
return argvalue
return _check | python | def make_check(exc_type, template, pred, actual, funcname):
"""
Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name.
"""
if isinstance(funcname, str):
def get_funcname(_):
return funcname
else:
get_funcname = funcname
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc_type(
template % {
'funcname': get_funcname(func),
'argname': argname,
'actual': actual(argvalue),
},
)
return argvalue
return _check | [
"def",
"make_check",
"(",
"exc_type",
",",
"template",
",",
"pred",
",",
"actual",
",",
"funcname",
")",
":",
"if",
"isinstance",
"(",
"funcname",
",",
"str",
")",
":",
"def",
"get_funcname",
"(",
"_",
")",
":",
"return",
"funcname",
"else",
":",
"get_funcname",
"=",
"funcname",
"def",
"_check",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
":",
"if",
"pred",
"(",
"argvalue",
")",
":",
"raise",
"exc_type",
"(",
"template",
"%",
"{",
"'funcname'",
":",
"get_funcname",
"(",
"func",
")",
",",
"'argname'",
":",
"argname",
",",
"'actual'",
":",
"actual",
"(",
"argvalue",
")",
",",
"}",
",",
")",
"return",
"argvalue",
"return",
"_check"
] | Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name. | [
"Factory",
"for",
"making",
"preprocessing",
"functions",
"that",
"check",
"a",
"predicate",
"on",
"the",
"input",
"value",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L416-L457 |
25,995 | quantopian/zipline | zipline/utils/input_validation.py | expect_element | def expect_element(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Examples
--------
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
# Special case the error message for set and frozen set to make it
# less verbose.
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection_for_error_message)
return make_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
funcname=__funcname,
)
return preprocess(**valmap(_expect_element, named)) | python | def expect_element(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Examples
--------
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
# Special case the error message for set and frozen set to make it
# less verbose.
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection_for_error_message)
return make_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
funcname=__funcname,
)
return preprocess(**valmap(_expect_element, named)) | [
"def",
"expect_element",
"(",
"__funcname",
"=",
"_qualified_name",
",",
"*",
"*",
"named",
")",
":",
"def",
"_expect_element",
"(",
"collection",
")",
":",
"if",
"isinstance",
"(",
"collection",
",",
"(",
"set",
",",
"frozenset",
")",
")",
":",
"# Special case the error message for set and frozen set to make it",
"# less verbose.",
"collection_for_error_message",
"=",
"tuple",
"(",
"sorted",
"(",
"collection",
")",
")",
"else",
":",
"collection_for_error_message",
"=",
"collection",
"template",
"=",
"(",
"\"%(funcname)s() expected a value in {collection} \"",
"\"for argument '%(argname)s', but got %(actual)s instead.\"",
")",
".",
"format",
"(",
"collection",
"=",
"collection_for_error_message",
")",
"return",
"make_check",
"(",
"ValueError",
",",
"template",
",",
"complement",
"(",
"op",
".",
"contains",
"(",
"collection",
")",
")",
",",
"repr",
",",
"funcname",
"=",
"__funcname",
",",
")",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_expect_element",
",",
"named",
")",
")"
] | Preprocessing decorator that verifies inputs are elements of some
expected collection.
Examples
--------
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol. | [
"Preprocessing",
"decorator",
"that",
"verifies",
"inputs",
"are",
"elements",
"of",
"some",
"expected",
"collection",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L484-L535 |
25,996 | quantopian/zipline | zipline/utils/input_validation.py | expect_bounded | def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named) | python | def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named) | [
"def",
"expect_bounded",
"(",
"__funcname",
"=",
"_qualified_name",
",",
"*",
"*",
"named",
")",
":",
"def",
"_make_bounded_check",
"(",
"bounds",
")",
":",
"(",
"lower",
",",
"upper",
")",
"=",
"bounds",
"if",
"lower",
"is",
"None",
":",
"def",
"should_fail",
"(",
"value",
")",
":",
"return",
"value",
">",
"upper",
"predicate_descr",
"=",
"\"less than or equal to \"",
"+",
"str",
"(",
"upper",
")",
"elif",
"upper",
"is",
"None",
":",
"def",
"should_fail",
"(",
"value",
")",
":",
"return",
"value",
"<",
"lower",
"predicate_descr",
"=",
"\"greater than or equal to \"",
"+",
"str",
"(",
"lower",
")",
"else",
":",
"def",
"should_fail",
"(",
"value",
")",
":",
"return",
"not",
"(",
"lower",
"<=",
"value",
"<=",
"upper",
")",
"predicate_descr",
"=",
"\"inclusively between %s and %s\"",
"%",
"bounds",
"template",
"=",
"(",
"\"%(funcname)s() expected a value {predicate}\"",
"\" for argument '%(argname)s', but got %(actual)s instead.\"",
")",
".",
"format",
"(",
"predicate",
"=",
"predicate_descr",
")",
"return",
"make_check",
"(",
"exc_type",
"=",
"ValueError",
",",
"template",
"=",
"template",
",",
"pred",
"=",
"should_fail",
",",
"actual",
"=",
"repr",
",",
"funcname",
"=",
"__funcname",
",",
")",
"return",
"_expect_bounded",
"(",
"_make_bounded_check",
",",
"__funcname",
"=",
"__funcname",
",",
"*",
"*",
"named",
")"
] | Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead. | [
"Preprocessing",
"decorator",
"verifying",
"that",
"inputs",
"fall",
"INCLUSIVELY",
"between",
"bounds",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L538-L614 |
25,997 | quantopian/zipline | zipline/utils/input_validation.py | expect_dimensions | def expect_dimensions(__funcname=_qualified_name, **dimensions):
"""
Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Examples
--------
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead.
"""
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
def _expect_dimension(expected_ndim):
def _check(func, argname, argvalue):
actual_ndim = argvalue.ndim
if actual_ndim != expected_ndim:
if actual_ndim == 0:
actual_repr = 'scalar'
else:
actual_repr = "%d-D array" % actual_ndim
raise ValueError(
"{func}() expected a {expected:d}-D array"
" for argument {argname!r}, but got a {actual}"
" instead.".format(
func=get_funcname(func),
expected=expected_ndim,
argname=argname,
actual=actual_repr,
)
)
return argvalue
return _check
return preprocess(**valmap(_expect_dimension, dimensions)) | python | def expect_dimensions(__funcname=_qualified_name, **dimensions):
"""
Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Examples
--------
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead.
"""
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
def _expect_dimension(expected_ndim):
def _check(func, argname, argvalue):
actual_ndim = argvalue.ndim
if actual_ndim != expected_ndim:
if actual_ndim == 0:
actual_repr = 'scalar'
else:
actual_repr = "%d-D array" % actual_ndim
raise ValueError(
"{func}() expected a {expected:d}-D array"
" for argument {argname!r}, but got a {actual}"
" instead.".format(
func=get_funcname(func),
expected=expected_ndim,
argname=argname,
actual=actual_repr,
)
)
return argvalue
return _check
return preprocess(**valmap(_expect_dimension, dimensions)) | [
"def",
"expect_dimensions",
"(",
"__funcname",
"=",
"_qualified_name",
",",
"*",
"*",
"dimensions",
")",
":",
"if",
"isinstance",
"(",
"__funcname",
",",
"str",
")",
":",
"def",
"get_funcname",
"(",
"_",
")",
":",
"return",
"__funcname",
"else",
":",
"get_funcname",
"=",
"__funcname",
"def",
"_expect_dimension",
"(",
"expected_ndim",
")",
":",
"def",
"_check",
"(",
"func",
",",
"argname",
",",
"argvalue",
")",
":",
"actual_ndim",
"=",
"argvalue",
".",
"ndim",
"if",
"actual_ndim",
"!=",
"expected_ndim",
":",
"if",
"actual_ndim",
"==",
"0",
":",
"actual_repr",
"=",
"'scalar'",
"else",
":",
"actual_repr",
"=",
"\"%d-D array\"",
"%",
"actual_ndim",
"raise",
"ValueError",
"(",
"\"{func}() expected a {expected:d}-D array\"",
"\" for argument {argname!r}, but got a {actual}\"",
"\" instead.\"",
".",
"format",
"(",
"func",
"=",
"get_funcname",
"(",
"func",
")",
",",
"expected",
"=",
"expected_ndim",
",",
"argname",
"=",
"argname",
",",
"actual",
"=",
"actual_repr",
",",
")",
")",
"return",
"argvalue",
"return",
"_check",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_expect_dimension",
",",
"dimensions",
")",
")"
] | Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Examples
--------
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead. | [
"Preprocessing",
"decorator",
"that",
"verifies",
"inputs",
"are",
"numpy",
"arrays",
"with",
"a",
"specific",
"dimensionality",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L717-L764 |
25,998 | quantopian/zipline | zipline/utils/input_validation.py | coerce | def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor | python | def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor | [
"def",
"coerce",
"(",
"from_",
",",
"to",
",",
"*",
"*",
"to_kwargs",
")",
":",
"def",
"preprocessor",
"(",
"func",
",",
"argname",
",",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"from_",
")",
":",
"return",
"to",
"(",
"arg",
",",
"*",
"*",
"to_kwargs",
")",
"return",
"arg",
"return",
"preprocessor"
] | A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110' | [
"A",
"preprocessing",
"decorator",
"that",
"coerces",
"inputs",
"of",
"a",
"given",
"type",
"by",
"passing",
"them",
"to",
"a",
"callable",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L767-L801 |
25,999 | quantopian/zipline | zipline/utils/input_validation.py | coerce_types | def coerce_types(**kwargs):
"""
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
"""
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs)) | python | def coerce_types(**kwargs):
"""
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
"""
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs)) | [
"def",
"coerce_types",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"_coerce",
"(",
"types",
")",
":",
"return",
"coerce",
"(",
"*",
"types",
")",
"return",
"preprocess",
"(",
"*",
"*",
"valmap",
"(",
"_coerce",
",",
"kwargs",
")",
")"
] | Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3') | [
"Preprocessing",
"decorator",
"that",
"applies",
"type",
"coercions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L804-L826 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.