id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
26,000 | quantopian/zipline | zipline/utils/input_validation.py | validate_keys | def validate_keys(dict_, expected, funcname):
"""Validate that a dictionary has an expected set of keys.
"""
expected = set(expected)
received = set(dict_)
missing = expected - received
if missing:
raise ValueError(
"Missing keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
)
unexpected = received - expected
if unexpected:
raise ValueError(
"Unexpected keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
) | python | def validate_keys(dict_, expected, funcname):
"""Validate that a dictionary has an expected set of keys.
"""
expected = set(expected)
received = set(dict_)
missing = expected - received
if missing:
raise ValueError(
"Missing keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
)
unexpected = received - expected
if unexpected:
raise ValueError(
"Unexpected keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
) | [
"def",
"validate_keys",
"(",
"dict_",
",",
"expected",
",",
"funcname",
")",
":",
"expected",
"=",
"set",
"(",
"expected",
")",
"received",
"=",
"set",
"(",
"dict_",
")",
"missing",
"=",
"expected",
"-",
"received",
"if",
"missing",
":",
"raise",
"ValueError",
"(",
"\"Missing keys in {}:\\n\"",
"\"Expected Keys: {}\\n\"",
"\"Received Keys: {}\"",
".",
"format",
"(",
"funcname",
",",
"sorted",
"(",
"expected",
")",
",",
"sorted",
"(",
"received",
")",
",",
")",
")",
"unexpected",
"=",
"received",
"-",
"expected",
"if",
"unexpected",
":",
"raise",
"ValueError",
"(",
"\"Unexpected keys in {}:\\n\"",
"\"Expected Keys: {}\\n\"",
"\"Received Keys: {}\"",
".",
"format",
"(",
"funcname",
",",
"sorted",
"(",
"expected",
")",
",",
"sorted",
"(",
"received",
")",
",",
")",
")"
] | Validate that a dictionary has an expected set of keys. | [
"Validate",
"that",
"a",
"dictionary",
"has",
"an",
"expected",
"set",
"of",
"keys",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L847-L875 |
26,001 | quantopian/zipline | zipline/utils/enum.py | enum | def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) | python | def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) | [
"def",
"enum",
"(",
"option",
",",
"*",
"options",
")",
":",
"options",
"=",
"(",
"option",
",",
")",
"+",
"options",
"rangeob",
"=",
"range",
"(",
"len",
"(",
"options",
")",
")",
"try",
":",
"inttype",
"=",
"_inttypes",
"[",
"int",
"(",
"np",
".",
"log2",
"(",
"len",
"(",
"options",
")",
"-",
"1",
")",
")",
"//",
"8",
"]",
"except",
"IndexError",
":",
"raise",
"OverflowError",
"(",
"'Cannot store enums with more than sys.maxsize elements, got %d'",
"%",
"len",
"(",
"options",
")",
",",
")",
"class",
"_enum",
"(",
"Structure",
")",
":",
"_fields_",
"=",
"[",
"(",
"o",
",",
"inttype",
")",
"for",
"o",
"in",
"options",
"]",
"def",
"__iter__",
"(",
"self",
")",
":",
"return",
"iter",
"(",
"rangeob",
")",
"def",
"__contains__",
"(",
"self",
",",
"value",
")",
":",
"return",
"0",
"<=",
"value",
"<",
"len",
"(",
"options",
")",
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"'<enum: %s>'",
"%",
"(",
"(",
"'%d fields'",
"%",
"len",
"(",
"options",
")",
")",
"if",
"len",
"(",
"options",
")",
">",
"10",
"else",
"repr",
"(",
"options",
")",
")",
"return",
"_enum",
"(",
"*",
"rangeob",
")"
] | Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)" | [
"Construct",
"a",
"new",
"enum",
"object",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/enum.py#L48-L114 |
26,002 | quantopian/zipline | zipline/utils/data.py | RollingPanel.extend_back | def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts | python | def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts | [
"def",
"extend_back",
"(",
"self",
",",
"missing_dts",
")",
":",
"delta",
"=",
"len",
"(",
"missing_dts",
")",
"if",
"not",
"delta",
":",
"raise",
"ValueError",
"(",
"'missing_dts must be a non-empty index'",
",",
")",
"self",
".",
"_window",
"+=",
"delta",
"self",
".",
"_pos",
"+=",
"delta",
"self",
".",
"date_buf",
"=",
"self",
".",
"date_buf",
".",
"copy",
"(",
")",
"self",
".",
"date_buf",
".",
"resize",
"(",
"self",
".",
"cap",
")",
"self",
".",
"date_buf",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"date_buf",
",",
"delta",
")",
"old_vals",
"=",
"self",
".",
"buffer",
".",
"values",
"shape",
"=",
"old_vals",
".",
"shape",
"nan_arr",
"=",
"np",
".",
"empty",
"(",
"(",
"shape",
"[",
"0",
"]",
",",
"delta",
",",
"shape",
"[",
"2",
"]",
")",
")",
"nan_arr",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"new_vals",
"=",
"np",
".",
"column_stack",
"(",
"(",
"nan_arr",
",",
"old_vals",
",",
"np",
".",
"empty",
"(",
"(",
"shape",
"[",
"0",
"]",
",",
"delta",
"*",
"(",
"self",
".",
"cap_multiple",
"-",
"1",
")",
",",
"shape",
"[",
"2",
"]",
")",
")",
")",
",",
")",
"self",
".",
"buffer",
"=",
"pd",
".",
"Panel",
"(",
"data",
"=",
"new_vals",
",",
"items",
"=",
"self",
".",
"items",
",",
"minor_axis",
"=",
"self",
".",
"minor_axis",
",",
"major_axis",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"cap",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
")",
"# Fill the delta with the dates we calculated.",
"where",
"=",
"slice",
"(",
"self",
".",
"_start_index",
",",
"self",
".",
"_start_index",
"+",
"delta",
")",
"self",
".",
"date_buf",
"[",
"where",
"]",
"=",
"missing_dts"
] | Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used. | [
"Resizes",
"the",
"buffer",
"to",
"hold",
"a",
"new",
"window",
"with",
"a",
"new",
"cap_multiple",
".",
"If",
"cap_multiple",
"is",
"None",
"then",
"the",
"old",
"cap_multiple",
"is",
"used",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L107-L148 |
26,003 | quantopian/zipline | zipline/utils/data.py | RollingPanel.set_current | def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values | python | def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values | [
"def",
"set_current",
"(",
"self",
",",
"panel",
")",
":",
"where",
"=",
"slice",
"(",
"self",
".",
"_start_index",
",",
"self",
".",
"_pos",
")",
"self",
".",
"buffer",
".",
"values",
"[",
":",
",",
"where",
",",
":",
"]",
"=",
"panel",
".",
"values"
] | Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current. | [
"Set",
"the",
"values",
"stored",
"in",
"our",
"current",
"in",
"-",
"view",
"data",
"to",
"be",
"values",
"of",
"the",
"passed",
"panel",
".",
"The",
"passed",
"panel",
"must",
"have",
"the",
"same",
"indices",
"as",
"the",
"panel",
"that",
"would",
"be",
"returned",
"by",
"self",
".",
"get_current",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L216-L223 |
26,004 | quantopian/zipline | zipline/utils/data.py | RollingPanel._roll_data | def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window | python | def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window | [
"def",
"_roll_data",
"(",
"self",
")",
":",
"self",
".",
"buffer",
".",
"values",
"[",
":",
",",
":",
"self",
".",
"_window",
",",
":",
"]",
"=",
"self",
".",
"buffer",
".",
"values",
"[",
":",
",",
"-",
"self",
".",
"_window",
":",
",",
":",
"]",
"self",
".",
"date_buf",
"[",
":",
"self",
".",
"_window",
"]",
"=",
"self",
".",
"date_buf",
"[",
"-",
"self",
".",
"_window",
":",
"]",
"self",
".",
"_pos",
"=",
"self",
".",
"_window"
] | Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration | [
"Roll",
"window",
"worth",
"of",
"data",
"up",
"to",
"position",
"zero",
".",
"Save",
"the",
"effort",
"of",
"having",
"to",
"expensively",
"roll",
"at",
"each",
"iteration"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L229-L238 |
26,005 | quantopian/zipline | zipline/finance/order.py | Order.check_triggers | def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
self.check_order_triggers(price)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None | python | def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
self.check_order_triggers(price)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None | [
"def",
"check_triggers",
"(",
"self",
",",
"price",
",",
"dt",
")",
":",
"stop_reached",
",",
"limit_reached",
",",
"sl_stop_reached",
"=",
"self",
".",
"check_order_triggers",
"(",
"price",
")",
"if",
"(",
"stop_reached",
",",
"limit_reached",
")",
"!=",
"(",
"self",
".",
"stop_reached",
",",
"self",
".",
"limit_reached",
")",
":",
"self",
".",
"dt",
"=",
"dt",
"self",
".",
"stop_reached",
"=",
"stop_reached",
"self",
".",
"limit_reached",
"=",
"limit_reached",
"if",
"sl_stop_reached",
":",
"# Change the STOP LIMIT order into a LIMIT order",
"self",
".",
"stop",
"=",
"None"
] | Update internal state based on price triggers and the
trade event's price. | [
"Update",
"internal",
"state",
"based",
"on",
"price",
"triggers",
"and",
"the",
"trade",
"event",
"s",
"price",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/order.py#L108-L122 |
26,006 | quantopian/zipline | zipline/finance/order.py | Order.triggered | def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True | python | def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True | [
"def",
"triggered",
"(",
"self",
")",
":",
"if",
"self",
".",
"stop",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"stop_reached",
":",
"return",
"False",
"if",
"self",
".",
"limit",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"limit_reached",
":",
"return",
"False",
"return",
"True"
] | For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached. | [
"For",
"a",
"market",
"order",
"True",
".",
"For",
"a",
"stop",
"order",
"True",
"IFF",
"stop_reached",
".",
"For",
"a",
"limit",
"order",
"True",
"IFF",
"limit_reached",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/order.py#L230-L242 |
26,007 | quantopian/zipline | zipline/gens/utils.py | hash_args | def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest() | python | def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest() | [
"def",
"hash_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"arg_string",
"=",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
")",
"kwarg_string",
"=",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"key",
")",
"+",
"'='",
"+",
"str",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"kwargs",
")",
"]",
")",
"combined",
"=",
"':'",
".",
"join",
"(",
"[",
"arg_string",
",",
"kwarg_string",
"]",
")",
"hasher",
"=",
"md5",
"(",
")",
"hasher",
".",
"update",
"(",
"b",
"(",
"combined",
")",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
] | Define a unique string for any set of representable args. | [
"Define",
"a",
"unique",
"string",
"for",
"any",
"set",
"of",
"representable",
"args",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L27-L36 |
26,008 | quantopian/zipline | zipline/gens/utils.py | assert_datasource_protocol | def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc | python | def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc | [
"def",
"assert_datasource_protocol",
"(",
"event",
")",
":",
"assert",
"event",
".",
"type",
"in",
"DATASOURCE_TYPE",
"# Done packets have no dt.",
"if",
"not",
"event",
".",
"type",
"==",
"DATASOURCE_TYPE",
".",
"DONE",
":",
"assert",
"isinstance",
"(",
"event",
".",
"dt",
",",
"datetime",
")",
"assert",
"event",
".",
"dt",
".",
"tzinfo",
"==",
"pytz",
".",
"utc"
] | Assert that an event meets the protocol for datasource outputs. | [
"Assert",
"that",
"an",
"event",
"meets",
"the",
"protocol",
"for",
"datasource",
"outputs",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L39-L47 |
26,009 | quantopian/zipline | zipline/gens/utils.py | assert_trade_protocol | def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime) | python | def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime) | [
"def",
"assert_trade_protocol",
"(",
"event",
")",
":",
"assert_datasource_protocol",
"(",
"event",
")",
"assert",
"event",
".",
"type",
"==",
"DATASOURCE_TYPE",
".",
"TRADE",
"assert",
"isinstance",
"(",
"event",
".",
"price",
",",
"numbers",
".",
"Real",
")",
"assert",
"isinstance",
"(",
"event",
".",
"volume",
",",
"numbers",
".",
"Integral",
")",
"assert",
"isinstance",
"(",
"event",
".",
"dt",
",",
"datetime",
")"
] | Assert that an event meets the protocol for datasource TRADE outputs. | [
"Assert",
"that",
"an",
"event",
"meets",
"the",
"protocol",
"for",
"datasource",
"TRADE",
"outputs",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L50-L57 |
26,010 | quantopian/zipline | zipline/gens/composites.py | date_sorted_sources | def date_sorted_sources(*sources):
"""
Takes an iterable of sources, generating namestrings and
piping their output into date_sort.
"""
sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources))
# Strip out key decoration
for _, message in sorted_stream:
yield message | python | def date_sorted_sources(*sources):
"""
Takes an iterable of sources, generating namestrings and
piping their output into date_sort.
"""
sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources))
# Strip out key decoration
for _, message in sorted_stream:
yield message | [
"def",
"date_sorted_sources",
"(",
"*",
"sources",
")",
":",
"sorted_stream",
"=",
"heapq",
".",
"merge",
"(",
"*",
"(",
"_decorate_source",
"(",
"s",
")",
"for",
"s",
"in",
"sources",
")",
")",
"# Strip out key decoration",
"for",
"_",
",",
"message",
"in",
"sorted_stream",
":",
"yield",
"message"
] | Takes an iterable of sources, generating namestrings and
piping their output into date_sort. | [
"Takes",
"an",
"iterable",
"of",
"sources",
"generating",
"namestrings",
"and",
"piping",
"their",
"output",
"into",
"date_sort",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/composites.py#L24-L33 |
26,011 | quantopian/zipline | zipline/utils/factory.py | create_daily_trade_source | def create_daily_trade_source(sids,
sim_params,
asset_finder,
trading_calendar):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.start_session, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
asset_finder,
trading_calendar=trading_calendar,
) | python | def create_daily_trade_source(sids,
sim_params,
asset_finder,
trading_calendar):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.start_session, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
asset_finder,
trading_calendar=trading_calendar,
) | [
"def",
"create_daily_trade_source",
"(",
"sids",
",",
"sim_params",
",",
"asset_finder",
",",
"trading_calendar",
")",
":",
"return",
"create_trade_source",
"(",
"sids",
",",
"timedelta",
"(",
"days",
"=",
"1",
")",
",",
"sim_params",
",",
"asset_finder",
",",
"trading_calendar",
"=",
"trading_calendar",
",",
")"
] | creates trade_count trades for each sid in sids list.
first trade will be on sim_params.start_session, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day. | [
"creates",
"trade_count",
"trades",
"for",
"each",
"sid",
"in",
"sids",
"list",
".",
"first",
"trade",
"will",
"be",
"on",
"sim_params",
".",
"start_session",
"and",
"daily",
"thereafter",
"for",
"each",
"sid",
".",
"Thus",
"two",
"sids",
"should",
"result",
"in",
"two",
"trades",
"per",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/factory.py#L115-L131 |
26,012 | quantopian/zipline | zipline/data/bundles/quandl.py | load_data_table | def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'ex-dividend',
'split_ratio',
],
)
data_table.rename(
columns={
'ticker': 'symbol',
'ex-dividend': 'ex_dividend',
},
inplace=True,
copy=False,
)
return data_table | python | def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'ex-dividend',
'split_ratio',
],
)
data_table.rename(
columns={
'ticker': 'symbol',
'ex-dividend': 'ex_dividend',
},
inplace=True,
copy=False,
)
return data_table | [
"def",
"load_data_table",
"(",
"file",
",",
"index_col",
",",
"show_progress",
"=",
"False",
")",
":",
"with",
"ZipFile",
"(",
"file",
")",
"as",
"zip_file",
":",
"file_names",
"=",
"zip_file",
".",
"namelist",
"(",
")",
"assert",
"len",
"(",
"file_names",
")",
"==",
"1",
",",
"\"Expected a single file from Quandl.\"",
"wiki_prices",
"=",
"file_names",
".",
"pop",
"(",
")",
"with",
"zip_file",
".",
"open",
"(",
"wiki_prices",
")",
"as",
"table_file",
":",
"if",
"show_progress",
":",
"log",
".",
"info",
"(",
"'Parsing raw data.'",
")",
"data_table",
"=",
"pd",
".",
"read_csv",
"(",
"table_file",
",",
"parse_dates",
"=",
"[",
"'date'",
"]",
",",
"index_col",
"=",
"index_col",
",",
"usecols",
"=",
"[",
"'ticker'",
",",
"'date'",
",",
"'open'",
",",
"'high'",
",",
"'low'",
",",
"'close'",
",",
"'volume'",
",",
"'ex-dividend'",
",",
"'split_ratio'",
",",
"]",
",",
")",
"data_table",
".",
"rename",
"(",
"columns",
"=",
"{",
"'ticker'",
":",
"'symbol'",
",",
"'ex-dividend'",
":",
"'ex_dividend'",
",",
"}",
",",
"inplace",
"=",
"True",
",",
"copy",
"=",
"False",
",",
")",
"return",
"data_table"
] | Load data table from zip file provided by Quandl. | [
"Load",
"data",
"table",
"from",
"zip",
"file",
"provided",
"by",
"Quandl",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L38-L75 |
26,013 | quantopian/zipline | zipline/data/bundles/quandl.py | fetch_data_table | def fetch_data_table(api_key,
show_progress,
retries):
""" Fetch WIKI Prices data table from Quandl
"""
for _ in range(retries):
try:
if show_progress:
log.info('Downloading WIKI metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading WIKI Prices table from Quandl"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
) | python | def fetch_data_table(api_key,
show_progress,
retries):
""" Fetch WIKI Prices data table from Quandl
"""
for _ in range(retries):
try:
if show_progress:
log.info('Downloading WIKI metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading WIKI Prices table from Quandl"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
) | [
"def",
"fetch_data_table",
"(",
"api_key",
",",
"show_progress",
",",
"retries",
")",
":",
"for",
"_",
"in",
"range",
"(",
"retries",
")",
":",
"try",
":",
"if",
"show_progress",
":",
"log",
".",
"info",
"(",
"'Downloading WIKI metadata.'",
")",
"metadata",
"=",
"pd",
".",
"read_csv",
"(",
"format_metadata_url",
"(",
"api_key",
")",
")",
"# Extract link from metadata and download zip file.",
"table_url",
"=",
"metadata",
".",
"loc",
"[",
"0",
",",
"'file.link'",
"]",
"if",
"show_progress",
":",
"raw_file",
"=",
"download_with_progress",
"(",
"table_url",
",",
"chunk_size",
"=",
"ONE_MEGABYTE",
",",
"label",
"=",
"\"Downloading WIKI Prices table from Quandl\"",
")",
"else",
":",
"raw_file",
"=",
"download_without_progress",
"(",
"table_url",
")",
"return",
"load_data_table",
"(",
"file",
"=",
"raw_file",
",",
"index_col",
"=",
"None",
",",
"show_progress",
"=",
"show_progress",
",",
")",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"\"Exception raised reading Quandl data. Retrying.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Failed to download Quandl data after %d attempts.\"",
"%",
"(",
"retries",
")",
")"
] | Fetch WIKI Prices data table from Quandl | [
"Fetch",
"WIKI",
"Prices",
"data",
"table",
"from",
"Quandl"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L78-L114 |
26,014 | quantopian/zipline | zipline/data/bundles/quandl.py | quandl_bundle | def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
'sid',
'date',
'split_ratio',
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
'sid',
'date',
'ex_dividend',
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
) | python | def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
'sid',
'date',
'split_ratio',
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
'sid',
'date',
'ex_dividend',
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
) | [
"def",
"quandl_bundle",
"(",
"environ",
",",
"asset_db_writer",
",",
"minute_bar_writer",
",",
"daily_bar_writer",
",",
"adjustment_writer",
",",
"calendar",
",",
"start_session",
",",
"end_session",
",",
"cache",
",",
"show_progress",
",",
"output_dir",
")",
":",
"api_key",
"=",
"environ",
".",
"get",
"(",
"'QUANDL_API_KEY'",
")",
"if",
"api_key",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Please set your QUANDL_API_KEY environment variable and retry.\"",
")",
"raw_data",
"=",
"fetch_data_table",
"(",
"api_key",
",",
"show_progress",
",",
"environ",
".",
"get",
"(",
"'QUANDL_DOWNLOAD_ATTEMPTS'",
",",
"5",
")",
")",
"asset_metadata",
"=",
"gen_asset_metadata",
"(",
"raw_data",
"[",
"[",
"'symbol'",
",",
"'date'",
"]",
"]",
",",
"show_progress",
")",
"asset_db_writer",
".",
"write",
"(",
"asset_metadata",
")",
"symbol_map",
"=",
"asset_metadata",
".",
"symbol",
"sessions",
"=",
"calendar",
".",
"sessions_in_range",
"(",
"start_session",
",",
"end_session",
")",
"raw_data",
".",
"set_index",
"(",
"[",
"'date'",
",",
"'symbol'",
"]",
",",
"inplace",
"=",
"True",
")",
"daily_bar_writer",
".",
"write",
"(",
"parse_pricing_and_vol",
"(",
"raw_data",
",",
"sessions",
",",
"symbol_map",
")",
",",
"show_progress",
"=",
"show_progress",
")",
"raw_data",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"raw_data",
"[",
"'symbol'",
"]",
"=",
"raw_data",
"[",
"'symbol'",
"]",
".",
"astype",
"(",
"'category'",
")",
"raw_data",
"[",
"'sid'",
"]",
"=",
"raw_data",
".",
"symbol",
".",
"cat",
".",
"codes",
"adjustment_writer",
".",
"write",
"(",
"splits",
"=",
"parse_splits",
"(",
"raw_data",
"[",
"[",
"'sid'",
",",
"'date'",
",",
"'split_ratio'",
",",
"]",
"]",
".",
"loc",
"[",
"raw_data",
".",
"split_ratio",
"!=",
"1",
"]",
",",
"show_progress",
"=",
"show_progress",
")",
",",
"dividends",
"=",
"parse_dividends",
"(",
"raw_data",
"[",
"[",
"'sid'",
",",
"'date'",
",",
"'ex_dividend'",
",",
"]",
"]",
".",
"loc",
"[",
"raw_data",
".",
"ex_dividend",
"!=",
"0",
"]",
",",
"show_progress",
"=",
"show_progress",
")",
")"
] | quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication | [
"quandl_bundle",
"builds",
"a",
"daily",
"dataset",
"using",
"Quandl",
"s",
"WIKI",
"Prices",
"dataset",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L183-L250 |
26,015 | quantopian/zipline | zipline/data/bundles/quandl.py | download_with_progress | def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data | python | def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data | [
"def",
"download_with_progress",
"(",
"url",
",",
"chunk_size",
",",
"*",
"*",
"progress_kwargs",
")",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"total_size",
"=",
"int",
"(",
"resp",
".",
"headers",
"[",
"'content-length'",
"]",
")",
"data",
"=",
"BytesIO",
"(",
")",
"with",
"progressbar",
"(",
"length",
"=",
"total_size",
",",
"*",
"*",
"progress_kwargs",
")",
"as",
"pbar",
":",
"for",
"chunk",
"in",
"resp",
".",
"iter_content",
"(",
"chunk_size",
"=",
"chunk_size",
")",
":",
"data",
".",
"write",
"(",
"chunk",
")",
"pbar",
".",
"update",
"(",
"len",
"(",
"chunk",
")",
")",
"data",
".",
"seek",
"(",
"0",
")",
"return",
"data"
] | Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data. | [
"Download",
"streaming",
"data",
"from",
"a",
"URL",
"printing",
"progress",
"information",
"to",
"the",
"terminal",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L253-L283 |
26,016 | quantopian/zipline | zipline/data/bundles/quandl.py | download_without_progress | def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content) | python | def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content) | [
"def",
"download_without_progress",
"(",
"url",
")",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"BytesIO",
"(",
"resp",
".",
"content",
")"
] | Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data. | [
"Download",
"data",
"from",
"a",
"URL",
"returning",
"a",
"BytesIO",
"containing",
"the",
"loaded",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L286-L302 |
26,017 | quantopian/zipline | zipline/data/resample.py | minute_frame_to_session_frame | def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how) | python | def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how) | [
"def",
"minute_frame_to_session_frame",
"(",
"minute_frame",
",",
"calendar",
")",
":",
"how",
"=",
"OrderedDict",
"(",
"(",
"c",
",",
"_MINUTE_TO_SESSION_OHCLV_HOW",
"[",
"c",
"]",
")",
"for",
"c",
"in",
"minute_frame",
".",
"columns",
")",
"labels",
"=",
"calendar",
".",
"minute_index_to_session_labels",
"(",
"minute_frame",
".",
"index",
")",
"return",
"minute_frame",
".",
"groupby",
"(",
"labels",
")",
".",
"agg",
"(",
"how",
")"
] | Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like). | [
"Resample",
"a",
"DataFrame",
"with",
"minute",
"data",
"into",
"the",
"frame",
"expected",
"by",
"a",
"BcolzDailyBarWriter",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L42-L66 |
26,018 | quantopian/zipline | zipline/data/resample.py | minute_to_session | def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out | python | def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out | [
"def",
"minute_to_session",
"(",
"column",
",",
"close_locs",
",",
"data",
",",
"out",
")",
":",
"if",
"column",
"==",
"'open'",
":",
"_minute_to_session_open",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'high'",
":",
"_minute_to_session_high",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'low'",
":",
"_minute_to_session_low",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'close'",
":",
"_minute_to_session_close",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'volume'",
":",
"_minute_to_session_volume",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"return",
"out"
] | Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions. | [
"Resample",
"an",
"array",
"with",
"minute",
"data",
"into",
"an",
"array",
"with",
"session",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L69-L100 |
26,019 | quantopian/zipline | zipline/data/resample.py | DailyHistoryAggregator.opens | def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens) | python | def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens) | [
"def",
"opens",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"market_open",
",",
"prev_dt",
",",
"dt_value",
",",
"entries",
"=",
"self",
".",
"_prelude",
"(",
"dt",
",",
"'open'",
")",
"opens",
"=",
"[",
"]",
"session_label",
"=",
"self",
".",
"_trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"for",
"asset",
"in",
"assets",
":",
"if",
"not",
"asset",
".",
"is_alive_for_session",
"(",
"session_label",
")",
":",
"opens",
".",
"append",
"(",
"np",
".",
"NaN",
")",
"continue",
"if",
"prev_dt",
"is",
"None",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'open'",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"opens",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"try",
":",
"last_visited_dt",
",",
"first_open",
"=",
"entries",
"[",
"asset",
"]",
"if",
"last_visited_dt",
"==",
"dt_value",
":",
"opens",
".",
"append",
"(",
"first_open",
")",
"continue",
"elif",
"not",
"pd",
".",
"isnull",
"(",
"first_open",
")",
":",
"opens",
".",
"append",
"(",
"first_open",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"first_open",
")",
"continue",
"else",
":",
"after_last",
"=",
"pd",
".",
"Timestamp",
"(",
"last_visited_dt",
"+",
"self",
".",
"_one_min",
",",
"tz",
"=",
"'UTC'",
")",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'open'",
"]",
",",
"after_last",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
"nonnan",
"=",
"window",
"[",
"~",
"pd",
".",
"isnull",
"(",
"window",
")",
"]",
"if",
"len",
"(",
"nonnan",
")",
":",
"val",
"=",
"nonnan",
"[",
"0",
"]",
"else",
":",
"val",
"=",
"np",
".",
"nan",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"opens",
".",
"append",
"(",
"val",
")",
"continue",
"except",
"KeyError",
":",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'open'",
"]",
",",
"market_open",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
"nonnan",
"=",
"window",
"[",
"~",
"pd",
".",
"isnull",
"(",
"window",
")",
"]",
"if",
"len",
"(",
"nonnan",
")",
":",
"val",
"=",
"nonnan",
"[",
"0",
"]",
"else",
":",
"val",
"=",
"np",
".",
"nan",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"opens",
".",
"append",
"(",
"val",
")",
"continue",
"return",
"np",
".",
"array",
"(",
"opens",
")"
] | The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter. | [
"The",
"open",
"field",
"s",
"aggregation",
"returns",
"the",
"first",
"value",
"that",
"occurs",
"for",
"the",
"day",
"if",
"there",
"has",
"been",
"no",
"data",
"on",
"or",
"before",
"the",
"dt",
"the",
"open",
"is",
"nan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L167-L237 |
26,020 | quantopian/zipline | zipline/data/resample.py | DailyHistoryAggregator.highs | def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs) | python | def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs) | [
"def",
"highs",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"market_open",
",",
"prev_dt",
",",
"dt_value",
",",
"entries",
"=",
"self",
".",
"_prelude",
"(",
"dt",
",",
"'high'",
")",
"highs",
"=",
"[",
"]",
"session_label",
"=",
"self",
".",
"_trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"for",
"asset",
"in",
"assets",
":",
"if",
"not",
"asset",
".",
"is_alive_for_session",
"(",
"session_label",
")",
":",
"highs",
".",
"append",
"(",
"np",
".",
"NaN",
")",
"continue",
"if",
"prev_dt",
"is",
"None",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'high'",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"highs",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"try",
":",
"last_visited_dt",
",",
"last_max",
"=",
"entries",
"[",
"asset",
"]",
"if",
"last_visited_dt",
"==",
"dt_value",
":",
"highs",
".",
"append",
"(",
"last_max",
")",
"continue",
"elif",
"last_visited_dt",
"==",
"prev_dt",
":",
"curr_val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'high'",
")",
"if",
"pd",
".",
"isnull",
"(",
"curr_val",
")",
":",
"val",
"=",
"last_max",
"elif",
"pd",
".",
"isnull",
"(",
"last_max",
")",
":",
"val",
"=",
"curr_val",
"else",
":",
"val",
"=",
"max",
"(",
"last_max",
",",
"curr_val",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"highs",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"after_last",
"=",
"pd",
".",
"Timestamp",
"(",
"last_visited_dt",
"+",
"self",
".",
"_one_min",
",",
"tz",
"=",
"'UTC'",
")",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'high'",
"]",
",",
"after_last",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
".",
"T",
"val",
"=",
"np",
".",
"nanmax",
"(",
"np",
".",
"append",
"(",
"window",
",",
"last_max",
")",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"highs",
".",
"append",
"(",
"val",
")",
"continue",
"except",
"KeyError",
":",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'high'",
"]",
",",
"market_open",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
".",
"T",
"val",
"=",
"np",
".",
"nanmax",
"(",
"window",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"highs",
".",
"append",
"(",
"val",
")",
"continue",
"return",
"np",
".",
"array",
"(",
"highs",
")"
] | The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter. | [
"The",
"high",
"field",
"s",
"aggregation",
"returns",
"the",
"largest",
"high",
"seen",
"between",
"the",
"market",
"open",
"and",
"the",
"current",
"dt",
".",
"If",
"there",
"has",
"been",
"no",
"data",
"on",
"or",
"before",
"the",
"dt",
"the",
"high",
"is",
"nan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L239-L306 |
26,021 | quantopian/zipline | zipline/data/resample.py | DailyHistoryAggregator.lows | def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows) | python | def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows) | [
"def",
"lows",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"market_open",
",",
"prev_dt",
",",
"dt_value",
",",
"entries",
"=",
"self",
".",
"_prelude",
"(",
"dt",
",",
"'low'",
")",
"lows",
"=",
"[",
"]",
"session_label",
"=",
"self",
".",
"_trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"for",
"asset",
"in",
"assets",
":",
"if",
"not",
"asset",
".",
"is_alive_for_session",
"(",
"session_label",
")",
":",
"lows",
".",
"append",
"(",
"np",
".",
"NaN",
")",
"continue",
"if",
"prev_dt",
"is",
"None",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'low'",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"lows",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"try",
":",
"last_visited_dt",
",",
"last_min",
"=",
"entries",
"[",
"asset",
"]",
"if",
"last_visited_dt",
"==",
"dt_value",
":",
"lows",
".",
"append",
"(",
"last_min",
")",
"continue",
"elif",
"last_visited_dt",
"==",
"prev_dt",
":",
"curr_val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'low'",
")",
"val",
"=",
"np",
".",
"nanmin",
"(",
"[",
"last_min",
",",
"curr_val",
"]",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"lows",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"after_last",
"=",
"pd",
".",
"Timestamp",
"(",
"last_visited_dt",
"+",
"self",
".",
"_one_min",
",",
"tz",
"=",
"'UTC'",
")",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'low'",
"]",
",",
"after_last",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
".",
"T",
"val",
"=",
"np",
".",
"nanmin",
"(",
"np",
".",
"append",
"(",
"window",
",",
"last_min",
")",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"lows",
".",
"append",
"(",
"val",
")",
"continue",
"except",
"KeyError",
":",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'low'",
"]",
",",
"market_open",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
".",
"T",
"val",
"=",
"np",
".",
"nanmin",
"(",
"window",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"lows",
".",
"append",
"(",
"val",
")",
"continue",
"return",
"np",
".",
"array",
"(",
"lows",
")"
] | The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter. | [
"The",
"low",
"field",
"s",
"aggregation",
"returns",
"the",
"smallest",
"low",
"seen",
"between",
"the",
"market",
"open",
"and",
"the",
"current",
"dt",
".",
"If",
"there",
"has",
"been",
"no",
"data",
"on",
"or",
"before",
"the",
"dt",
"the",
"low",
"is",
"nan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L308-L370 |
26,022 | quantopian/zipline | zipline/data/resample.py | DailyHistoryAggregator.closes | def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes) | python | def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes) | [
"def",
"closes",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"market_open",
",",
"prev_dt",
",",
"dt_value",
",",
"entries",
"=",
"self",
".",
"_prelude",
"(",
"dt",
",",
"'close'",
")",
"closes",
"=",
"[",
"]",
"session_label",
"=",
"self",
".",
"_trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"def",
"_get_filled_close",
"(",
"asset",
")",
":",
"\"\"\"\n Returns the most recent non-nan close for the asset in this\n session. If there has been no data in this session on or before the\n `dt`, returns `nan`\n \"\"\"",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'close'",
"]",
",",
"market_open",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
"try",
":",
"return",
"window",
"[",
"~",
"np",
".",
"isnan",
"(",
"window",
")",
"]",
"[",
"-",
"1",
"]",
"except",
"IndexError",
":",
"return",
"np",
".",
"NaN",
"for",
"asset",
"in",
"assets",
":",
"if",
"not",
"asset",
".",
"is_alive_for_session",
"(",
"session_label",
")",
":",
"closes",
".",
"append",
"(",
"np",
".",
"NaN",
")",
"continue",
"if",
"prev_dt",
"is",
"None",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'close'",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"closes",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"try",
":",
"last_visited_dt",
",",
"last_close",
"=",
"entries",
"[",
"asset",
"]",
"if",
"last_visited_dt",
"==",
"dt_value",
":",
"closes",
".",
"append",
"(",
"last_close",
")",
"continue",
"elif",
"last_visited_dt",
"==",
"prev_dt",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'close'",
")",
"if",
"pd",
".",
"isnull",
"(",
"val",
")",
":",
"val",
"=",
"last_close",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"closes",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'close'",
")",
"if",
"pd",
".",
"isnull",
"(",
"val",
")",
":",
"val",
"=",
"_get_filled_close",
"(",
"asset",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"closes",
".",
"append",
"(",
"val",
")",
"continue",
"except",
"KeyError",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'close'",
")",
"if",
"pd",
".",
"isnull",
"(",
"val",
")",
":",
"val",
"=",
"_get_filled_close",
"(",
"asset",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"closes",
".",
"append",
"(",
"val",
")",
"continue",
"return",
"np",
".",
"array",
"(",
"closes",
")"
] | The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter. | [
"The",
"close",
"field",
"s",
"aggregation",
"returns",
"the",
"latest",
"close",
"at",
"the",
"given",
"dt",
".",
"If",
"the",
"close",
"for",
"the",
"given",
"dt",
"is",
"nan",
"the",
"most",
"recent",
"non",
"-",
"nan",
"close",
"is",
"used",
".",
"If",
"there",
"has",
"been",
"no",
"data",
"on",
"or",
"before",
"the",
"dt",
"the",
"close",
"is",
"nan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L372-L446 |
26,023 | quantopian/zipline | zipline/data/resample.py | DailyHistoryAggregator.volumes | def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes) | python | def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes) | [
"def",
"volumes",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"market_open",
",",
"prev_dt",
",",
"dt_value",
",",
"entries",
"=",
"self",
".",
"_prelude",
"(",
"dt",
",",
"'volume'",
")",
"volumes",
"=",
"[",
"]",
"session_label",
"=",
"self",
".",
"_trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"for",
"asset",
"in",
"assets",
":",
"if",
"not",
"asset",
".",
"is_alive_for_session",
"(",
"session_label",
")",
":",
"volumes",
".",
"append",
"(",
"0",
")",
"continue",
"if",
"prev_dt",
"is",
"None",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'volume'",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"volumes",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"try",
":",
"last_visited_dt",
",",
"last_total",
"=",
"entries",
"[",
"asset",
"]",
"if",
"last_visited_dt",
"==",
"dt_value",
":",
"volumes",
".",
"append",
"(",
"last_total",
")",
"continue",
"elif",
"last_visited_dt",
"==",
"prev_dt",
":",
"val",
"=",
"self",
".",
"_minute_reader",
".",
"get_value",
"(",
"asset",
",",
"dt",
",",
"'volume'",
")",
"val",
"+=",
"last_total",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"volumes",
".",
"append",
"(",
"val",
")",
"continue",
"else",
":",
"after_last",
"=",
"pd",
".",
"Timestamp",
"(",
"last_visited_dt",
"+",
"self",
".",
"_one_min",
",",
"tz",
"=",
"'UTC'",
")",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'volume'",
"]",
",",
"after_last",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
"val",
"=",
"np",
".",
"nansum",
"(",
"window",
")",
"+",
"last_total",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"volumes",
".",
"append",
"(",
"val",
")",
"continue",
"except",
"KeyError",
":",
"window",
"=",
"self",
".",
"_minute_reader",
".",
"load_raw_arrays",
"(",
"[",
"'volume'",
"]",
",",
"market_open",
",",
"dt",
",",
"[",
"asset",
"]",
",",
")",
"[",
"0",
"]",
"val",
"=",
"np",
".",
"nansum",
"(",
"window",
")",
"entries",
"[",
"asset",
"]",
"=",
"(",
"dt_value",
",",
"val",
")",
"volumes",
".",
"append",
"(",
"val",
")",
"continue",
"return",
"np",
".",
"array",
"(",
"volumes",
")"
] | The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter. | [
"The",
"volume",
"field",
"s",
"aggregation",
"returns",
"the",
"sum",
"of",
"all",
"volumes",
"between",
"the",
"market",
"open",
"and",
"the",
"dt",
"If",
"there",
"has",
"been",
"no",
"data",
"on",
"or",
"before",
"the",
"dt",
"the",
"volume",
"is",
"0",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L448-L510 |
26,024 | quantopian/zipline | zipline/pipeline/domain.py | infer_domain | def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.term.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr)) | python | def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.term.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr)) | [
"def",
"infer_domain",
"(",
"terms",
")",
":",
"domains",
"=",
"{",
"t",
".",
"domain",
"for",
"t",
"in",
"terms",
"}",
"num_domains",
"=",
"len",
"(",
"domains",
")",
"if",
"num_domains",
"==",
"0",
":",
"return",
"GENERIC",
"elif",
"num_domains",
"==",
"1",
":",
"return",
"domains",
".",
"pop",
"(",
")",
"elif",
"num_domains",
"==",
"2",
"and",
"GENERIC",
"in",
"domains",
":",
"domains",
".",
"remove",
"(",
"GENERIC",
")",
"return",
"domains",
".",
"pop",
"(",
")",
"else",
":",
"# Remove GENERIC if it's present before raising. Showing it to the user",
"# is confusing because it doesn't contribute to the error.",
"domains",
".",
"discard",
"(",
"GENERIC",
")",
"raise",
"AmbiguousDomain",
"(",
"sorted",
"(",
"domains",
",",
"key",
"=",
"repr",
")",
")"
] | Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.term.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms. | [
"Infer",
"the",
"domain",
"from",
"a",
"collection",
"of",
"terms",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/domain.py#L274-L314 |
26,025 | quantopian/zipline | zipline/pipeline/domain.py | IDomain.roll_forward | def roll_forward(self, dt):
"""
Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp
"""
dt = pd.Timestamp(dt, tz='UTC')
trading_days = self.all_sessions()
try:
return trading_days[trading_days.searchsorted(dt)]
except IndexError:
raise ValueError(
"Date {} was past the last session for domain {}. "
"The last session for this domain is {}.".format(
dt.date(),
self,
trading_days[-1].date()
)
) | python | def roll_forward(self, dt):
"""
Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp
"""
dt = pd.Timestamp(dt, tz='UTC')
trading_days = self.all_sessions()
try:
return trading_days[trading_days.searchsorted(dt)]
except IndexError:
raise ValueError(
"Date {} was past the last session for domain {}. "
"The last session for this domain is {}.".format(
dt.date(),
self,
trading_days[-1].date()
)
) | [
"def",
"roll_forward",
"(",
"self",
",",
"dt",
")",
":",
"dt",
"=",
"pd",
".",
"Timestamp",
"(",
"dt",
",",
"tz",
"=",
"'UTC'",
")",
"trading_days",
"=",
"self",
".",
"all_sessions",
"(",
")",
"try",
":",
"return",
"trading_days",
"[",
"trading_days",
".",
"searchsorted",
"(",
"dt",
")",
"]",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"Date {} was past the last session for domain {}. \"",
"\"The last session for this domain is {}.\"",
".",
"format",
"(",
"dt",
".",
"date",
"(",
")",
",",
"self",
",",
"trading_days",
"[",
"-",
"1",
"]",
".",
"date",
"(",
")",
")",
")"
] | Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp | [
"Given",
"a",
"date",
"align",
"it",
"to",
"the",
"calendar",
"of",
"the",
"pipeline",
"s",
"domain",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/domain.py#L77-L102 |
26,026 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | days_and_sids_for_frames | def days_and_sids_for_frames(frames):
"""
Returns the date index and sid columns shared by a list of dataframes,
ensuring they all match.
Parameters
----------
frames : list[pd.DataFrame]
A list of dataframes indexed by day, with a column per sid.
Returns
-------
days : np.array[datetime64[ns]]
The days in these dataframes.
sids : np.array[int64]
The sids in these dataframes.
Raises
------
ValueError
If the dataframes passed are not all indexed by the same days
and sids.
"""
if not frames:
days = np.array([], dtype='datetime64[ns]')
sids = np.array([], dtype='int64')
return days, sids
# Ensure the indices and columns all match.
check_indexes_all_same(
[frame.index for frame in frames],
message='Frames have mistmatched days.',
)
check_indexes_all_same(
[frame.columns for frame in frames],
message='Frames have mismatched sids.',
)
return frames[0].index.values, frames[0].columns.values | python | def days_and_sids_for_frames(frames):
"""
Returns the date index and sid columns shared by a list of dataframes,
ensuring they all match.
Parameters
----------
frames : list[pd.DataFrame]
A list of dataframes indexed by day, with a column per sid.
Returns
-------
days : np.array[datetime64[ns]]
The days in these dataframes.
sids : np.array[int64]
The sids in these dataframes.
Raises
------
ValueError
If the dataframes passed are not all indexed by the same days
and sids.
"""
if not frames:
days = np.array([], dtype='datetime64[ns]')
sids = np.array([], dtype='int64')
return days, sids
# Ensure the indices and columns all match.
check_indexes_all_same(
[frame.index for frame in frames],
message='Frames have mistmatched days.',
)
check_indexes_all_same(
[frame.columns for frame in frames],
message='Frames have mismatched sids.',
)
return frames[0].index.values, frames[0].columns.values | [
"def",
"days_and_sids_for_frames",
"(",
"frames",
")",
":",
"if",
"not",
"frames",
":",
"days",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'datetime64[ns]'",
")",
"sids",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'int64'",
")",
"return",
"days",
",",
"sids",
"# Ensure the indices and columns all match.",
"check_indexes_all_same",
"(",
"[",
"frame",
".",
"index",
"for",
"frame",
"in",
"frames",
"]",
",",
"message",
"=",
"'Frames have mistmatched days.'",
",",
")",
"check_indexes_all_same",
"(",
"[",
"frame",
".",
"columns",
"for",
"frame",
"in",
"frames",
"]",
",",
"message",
"=",
"'Frames have mismatched sids.'",
",",
")",
"return",
"frames",
"[",
"0",
"]",
".",
"index",
".",
"values",
",",
"frames",
"[",
"0",
"]",
".",
"columns",
".",
"values"
] | Returns the date index and sid columns shared by a list of dataframes,
ensuring they all match.
Parameters
----------
frames : list[pd.DataFrame]
A list of dataframes indexed by day, with a column per sid.
Returns
-------
days : np.array[datetime64[ns]]
The days in these dataframes.
sids : np.array[int64]
The sids in these dataframes.
Raises
------
ValueError
If the dataframes passed are not all indexed by the same days
and sids. | [
"Returns",
"the",
"date",
"index",
"and",
"sid",
"columns",
"shared",
"by",
"a",
"list",
"of",
"dataframes",
"ensuring",
"they",
"all",
"match",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L154-L192 |
26,027 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | HDF5DailyBarWriter.write | def write(self, country_code, frames, scaling_factors=None):
"""Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
if scaling_factors is None:
scaling_factors = DEFAULT_SCALING_FACTORS
with self.h5_file(mode='a') as h5_file:
# ensure that the file version has been written
h5_file.attrs['version'] = VERSION
country_group = h5_file.create_group(country_code)
data_group = country_group.create_group(DATA)
index_group = country_group.create_group(INDEX)
lifetimes_group = country_group.create_group(LIFETIMES)
# Note that this functions validates that all of the frames
# share the same days and sids.
days, sids = days_and_sids_for_frames(list(frames.values()))
# Write sid and date indices.
index_group.create_dataset(SID, data=sids)
# h5py does not support datetimes, so they need to be stored
# as integers.
index_group.create_dataset(DAY, data=days.astype(np.int64))
log.debug(
'Wrote {} group to file {}',
index_group.name,
self._filename,
)
# Write start and end dates for each sid.
start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames)
lifetimes_group.create_dataset(START_DATE, data=start_date_ixs)
lifetimes_group.create_dataset(END_DATE, data=end_date_ixs)
if len(sids):
chunks = (len(sids), min(self._date_chunk_size, len(days)))
else:
# h5py crashes if we provide chunks for empty data.
chunks = None
for field in FIELDS:
frame = frames[field]
# Sort rows by increasing sid, and columns by increasing date.
frame.sort_index(inplace=True)
frame.sort_index(axis='columns', inplace=True)
data = coerce_to_uint32(
frame.T.fillna(0).values,
scaling_factors[field],
)
dataset = data_group.create_dataset(
field,
compression='lzf',
shuffle=True,
data=data,
chunks=chunks,
)
dataset.attrs[SCALING_FACTOR] = scaling_factors[field]
log.debug(
'Writing dataset {} to file {}',
dataset.name, self._filename
) | python | def write(self, country_code, frames, scaling_factors=None):
"""Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
if scaling_factors is None:
scaling_factors = DEFAULT_SCALING_FACTORS
with self.h5_file(mode='a') as h5_file:
# ensure that the file version has been written
h5_file.attrs['version'] = VERSION
country_group = h5_file.create_group(country_code)
data_group = country_group.create_group(DATA)
index_group = country_group.create_group(INDEX)
lifetimes_group = country_group.create_group(LIFETIMES)
# Note that this functions validates that all of the frames
# share the same days and sids.
days, sids = days_and_sids_for_frames(list(frames.values()))
# Write sid and date indices.
index_group.create_dataset(SID, data=sids)
# h5py does not support datetimes, so they need to be stored
# as integers.
index_group.create_dataset(DAY, data=days.astype(np.int64))
log.debug(
'Wrote {} group to file {}',
index_group.name,
self._filename,
)
# Write start and end dates for each sid.
start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames)
lifetimes_group.create_dataset(START_DATE, data=start_date_ixs)
lifetimes_group.create_dataset(END_DATE, data=end_date_ixs)
if len(sids):
chunks = (len(sids), min(self._date_chunk_size, len(days)))
else:
# h5py crashes if we provide chunks for empty data.
chunks = None
for field in FIELDS:
frame = frames[field]
# Sort rows by increasing sid, and columns by increasing date.
frame.sort_index(inplace=True)
frame.sort_index(axis='columns', inplace=True)
data = coerce_to_uint32(
frame.T.fillna(0).values,
scaling_factors[field],
)
dataset = data_group.create_dataset(
field,
compression='lzf',
shuffle=True,
data=data,
chunks=chunks,
)
dataset.attrs[SCALING_FACTOR] = scaling_factors[field]
log.debug(
'Writing dataset {} to file {}',
dataset.name, self._filename
) | [
"def",
"write",
"(",
"self",
",",
"country_code",
",",
"frames",
",",
"scaling_factors",
"=",
"None",
")",
":",
"if",
"scaling_factors",
"is",
"None",
":",
"scaling_factors",
"=",
"DEFAULT_SCALING_FACTORS",
"with",
"self",
".",
"h5_file",
"(",
"mode",
"=",
"'a'",
")",
"as",
"h5_file",
":",
"# ensure that the file version has been written",
"h5_file",
".",
"attrs",
"[",
"'version'",
"]",
"=",
"VERSION",
"country_group",
"=",
"h5_file",
".",
"create_group",
"(",
"country_code",
")",
"data_group",
"=",
"country_group",
".",
"create_group",
"(",
"DATA",
")",
"index_group",
"=",
"country_group",
".",
"create_group",
"(",
"INDEX",
")",
"lifetimes_group",
"=",
"country_group",
".",
"create_group",
"(",
"LIFETIMES",
")",
"# Note that this functions validates that all of the frames",
"# share the same days and sids.",
"days",
",",
"sids",
"=",
"days_and_sids_for_frames",
"(",
"list",
"(",
"frames",
".",
"values",
"(",
")",
")",
")",
"# Write sid and date indices.",
"index_group",
".",
"create_dataset",
"(",
"SID",
",",
"data",
"=",
"sids",
")",
"# h5py does not support datetimes, so they need to be stored",
"# as integers.",
"index_group",
".",
"create_dataset",
"(",
"DAY",
",",
"data",
"=",
"days",
".",
"astype",
"(",
"np",
".",
"int64",
")",
")",
"log",
".",
"debug",
"(",
"'Wrote {} group to file {}'",
",",
"index_group",
".",
"name",
",",
"self",
".",
"_filename",
",",
")",
"# Write start and end dates for each sid.",
"start_date_ixs",
",",
"end_date_ixs",
"=",
"compute_asset_lifetimes",
"(",
"frames",
")",
"lifetimes_group",
".",
"create_dataset",
"(",
"START_DATE",
",",
"data",
"=",
"start_date_ixs",
")",
"lifetimes_group",
".",
"create_dataset",
"(",
"END_DATE",
",",
"data",
"=",
"end_date_ixs",
")",
"if",
"len",
"(",
"sids",
")",
":",
"chunks",
"=",
"(",
"len",
"(",
"sids",
")",
",",
"min",
"(",
"self",
".",
"_date_chunk_size",
",",
"len",
"(",
"days",
")",
")",
")",
"else",
":",
"# h5py crashes if we provide chunks for empty data.",
"chunks",
"=",
"None",
"for",
"field",
"in",
"FIELDS",
":",
"frame",
"=",
"frames",
"[",
"field",
"]",
"# Sort rows by increasing sid, and columns by increasing date.",
"frame",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",
"frame",
".",
"sort_index",
"(",
"axis",
"=",
"'columns'",
",",
"inplace",
"=",
"True",
")",
"data",
"=",
"coerce_to_uint32",
"(",
"frame",
".",
"T",
".",
"fillna",
"(",
"0",
")",
".",
"values",
",",
"scaling_factors",
"[",
"field",
"]",
",",
")",
"dataset",
"=",
"data_group",
".",
"create_dataset",
"(",
"field",
",",
"compression",
"=",
"'lzf'",
",",
"shuffle",
"=",
"True",
",",
"data",
"=",
"data",
",",
"chunks",
"=",
"chunks",
",",
")",
"dataset",
".",
"attrs",
"[",
"SCALING_FACTOR",
"]",
"=",
"scaling_factors",
"[",
"field",
"]",
"log",
".",
"debug",
"(",
"'Writing dataset {} to file {}'",
",",
"dataset",
".",
"name",
",",
"self",
".",
"_filename",
")"
] | Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used. | [
"Write",
"the",
"OHLCV",
"data",
"for",
"one",
"country",
"to",
"the",
"HDF5",
"file",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L220-L307 |
26,028 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | HDF5DailyBarReader.from_file | def from_file(cls, h5_file, country_code):
"""
Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
if h5_file.attrs['version'] != VERSION:
raise ValueError(
'mismatched version: file is of version %s, expected %s' % (
h5_file.attrs['version'],
VERSION,
),
)
return cls(h5_file[country_code]) | python | def from_file(cls, h5_file, country_code):
"""
Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
if h5_file.attrs['version'] != VERSION:
raise ValueError(
'mismatched version: file is of version %s, expected %s' % (
h5_file.attrs['version'],
VERSION,
),
)
return cls(h5_file[country_code]) | [
"def",
"from_file",
"(",
"cls",
",",
"h5_file",
",",
"country_code",
")",
":",
"if",
"h5_file",
".",
"attrs",
"[",
"'version'",
"]",
"!=",
"VERSION",
":",
"raise",
"ValueError",
"(",
"'mismatched version: file is of version %s, expected %s'",
"%",
"(",
"h5_file",
".",
"attrs",
"[",
"'version'",
"]",
",",
"VERSION",
",",
")",
",",
")",
"return",
"cls",
"(",
"h5_file",
"[",
"country_code",
"]",
")"
] | Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read. | [
"Construct",
"from",
"an",
"h5py",
".",
"File",
"and",
"a",
"country",
"code",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L424-L443 |
26,029 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | HDF5DailyBarReader.from_path | def from_path(cls, path, country_code):
"""
Construct from a file path and a country code.
Parameters
----------
path : str
The path to an HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
return cls.from_file(h5py.File(path), country_code) | python | def from_path(cls, path, country_code):
"""
Construct from a file path and a country code.
Parameters
----------
path : str
The path to an HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
return cls.from_file(h5py.File(path), country_code) | [
"def",
"from_path",
"(",
"cls",
",",
"path",
",",
"country_code",
")",
":",
"return",
"cls",
".",
"from_file",
"(",
"h5py",
".",
"File",
"(",
"path",
")",
",",
"country_code",
")"
] | Construct from a file path and a country code.
Parameters
----------
path : str
The path to an HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read. | [
"Construct",
"from",
"a",
"file",
"path",
"and",
"a",
"country",
"code",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L446-L457 |
26,030 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | HDF5DailyBarReader._make_sid_selector | def _make_sid_selector(self, assets):
"""
Build an indexer mapping ``self.sids`` to ``assets``.
Parameters
----------
assets : list[int]
List of assets requested by a caller of ``load_raw_arrays``.
Returns
-------
index : np.array[int64]
Index array containing the index in ``self.sids`` for each location
in ``assets``. Entries in ``assets`` for which we don't have a sid
will contain -1. It is caller's responsibility to handle these
values correctly.
"""
assets = np.array(assets)
sid_selector = self.sids.searchsorted(assets)
unknown = np.in1d(assets, self.sids, invert=True)
sid_selector[unknown] = -1
return sid_selector | python | def _make_sid_selector(self, assets):
"""
Build an indexer mapping ``self.sids`` to ``assets``.
Parameters
----------
assets : list[int]
List of assets requested by a caller of ``load_raw_arrays``.
Returns
-------
index : np.array[int64]
Index array containing the index in ``self.sids`` for each location
in ``assets``. Entries in ``assets`` for which we don't have a sid
will contain -1. It is caller's responsibility to handle these
values correctly.
"""
assets = np.array(assets)
sid_selector = self.sids.searchsorted(assets)
unknown = np.in1d(assets, self.sids, invert=True)
sid_selector[unknown] = -1
return sid_selector | [
"def",
"_make_sid_selector",
"(",
"self",
",",
"assets",
")",
":",
"assets",
"=",
"np",
".",
"array",
"(",
"assets",
")",
"sid_selector",
"=",
"self",
".",
"sids",
".",
"searchsorted",
"(",
"assets",
")",
"unknown",
"=",
"np",
".",
"in1d",
"(",
"assets",
",",
"self",
".",
"sids",
",",
"invert",
"=",
"True",
")",
"sid_selector",
"[",
"unknown",
"]",
"=",
"-",
"1",
"return",
"sid_selector"
] | Build an indexer mapping ``self.sids`` to ``assets``.
Parameters
----------
assets : list[int]
List of assets requested by a caller of ``load_raw_arrays``.
Returns
-------
index : np.array[int64]
Index array containing the index in ``self.sids`` for each location
in ``assets``. Entries in ``assets`` for which we don't have a sid
will contain -1. It is caller's responsibility to handle these
values correctly. | [
"Build",
"an",
"indexer",
"mapping",
"self",
".",
"sids",
"to",
"assets",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L530-L551 |
26,031 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | HDF5DailyBarReader._validate_assets | def _validate_assets(self, assets):
"""Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars.
"""
missing_sids = np.setdiff1d(assets, self.sids)
if len(missing_sids):
raise NoDataForSid(
'Assets not contained in daily pricing file: {}'.format(
missing_sids
)
) | python | def _validate_assets(self, assets):
"""Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars.
"""
missing_sids = np.setdiff1d(assets, self.sids)
if len(missing_sids):
raise NoDataForSid(
'Assets not contained in daily pricing file: {}'.format(
missing_sids
)
) | [
"def",
"_validate_assets",
"(",
"self",
",",
"assets",
")",
":",
"missing_sids",
"=",
"np",
".",
"setdiff1d",
"(",
"assets",
",",
"self",
".",
"sids",
")",
"if",
"len",
"(",
"missing_sids",
")",
":",
"raise",
"NoDataForSid",
"(",
"'Assets not contained in daily pricing file: {}'",
".",
"format",
"(",
"missing_sids",
")",
")"
] | Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars. | [
"Validate",
"that",
"asset",
"identifiers",
"are",
"contained",
"in",
"the",
"daily",
"bars",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L562-L583 |
26,032 | quantopian/zipline | zipline/data/hdf5_daily_bars.py | MultiCountryDailyBarReader.from_file | def from_file(cls, h5_file):
"""
Construct from an h5py.File.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
"""
return cls({
country: HDF5DailyBarReader.from_file(h5_file, country)
for country in h5_file.keys()
}) | python | def from_file(cls, h5_file):
"""
Construct from an h5py.File.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
"""
return cls({
country: HDF5DailyBarReader.from_file(h5_file, country)
for country in h5_file.keys()
}) | [
"def",
"from_file",
"(",
"cls",
",",
"h5_file",
")",
":",
"return",
"cls",
"(",
"{",
"country",
":",
"HDF5DailyBarReader",
".",
"from_file",
"(",
"h5_file",
",",
"country",
")",
"for",
"country",
"in",
"h5_file",
".",
"keys",
"(",
")",
"}",
")"
] | Construct from an h5py.File.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file. | [
"Construct",
"from",
"an",
"h5py",
".",
"File",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L745-L757 |
26,033 | quantopian/zipline | zipline/assets/asset_writer.py | _normalize_index_columns_in_place | def _normalize_index_columns_in_place(equities,
equity_supplementary_mappings,
futures,
exchanges,
root_symbols):
"""
Update dataframes in place to set indentifier columns as indices.
For each input frame, if the frame has a column with the same name as its
associated index column, set that column as the index.
Otherwise, assume the index already contains identifiers.
If frames are passed as None, they're ignored.
"""
for frame, column_name in ((equities, 'sid'),
(equity_supplementary_mappings, 'sid'),
(futures, 'sid'),
(exchanges, 'exchange'),
(root_symbols, 'root_symbol')):
if frame is not None and column_name in frame:
frame.set_index(column_name, inplace=True) | python | def _normalize_index_columns_in_place(equities,
equity_supplementary_mappings,
futures,
exchanges,
root_symbols):
"""
Update dataframes in place to set indentifier columns as indices.
For each input frame, if the frame has a column with the same name as its
associated index column, set that column as the index.
Otherwise, assume the index already contains identifiers.
If frames are passed as None, they're ignored.
"""
for frame, column_name in ((equities, 'sid'),
(equity_supplementary_mappings, 'sid'),
(futures, 'sid'),
(exchanges, 'exchange'),
(root_symbols, 'root_symbol')):
if frame is not None and column_name in frame:
frame.set_index(column_name, inplace=True) | [
"def",
"_normalize_index_columns_in_place",
"(",
"equities",
",",
"equity_supplementary_mappings",
",",
"futures",
",",
"exchanges",
",",
"root_symbols",
")",
":",
"for",
"frame",
",",
"column_name",
"in",
"(",
"(",
"equities",
",",
"'sid'",
")",
",",
"(",
"equity_supplementary_mappings",
",",
"'sid'",
")",
",",
"(",
"futures",
",",
"'sid'",
")",
",",
"(",
"exchanges",
",",
"'exchange'",
")",
",",
"(",
"root_symbols",
",",
"'root_symbol'",
")",
")",
":",
"if",
"frame",
"is",
"not",
"None",
"and",
"column_name",
"in",
"frame",
":",
"frame",
".",
"set_index",
"(",
"column_name",
",",
"inplace",
"=",
"True",
")"
] | Update dataframes in place to set indentifier columns as indices.
For each input frame, if the frame has a column with the same name as its
associated index column, set that column as the index.
Otherwise, assume the index already contains identifiers.
If frames are passed as None, they're ignored. | [
"Update",
"dataframes",
"in",
"place",
"to",
"set",
"indentifier",
"columns",
"as",
"indices",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L74-L95 |
26,034 | quantopian/zipline | zipline/assets/asset_writer.py | split_delimited_symbol | def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol.
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return '', ''
symbol = symbol.upper()
split_list = re.split(
pattern=_delimited_symbol_delimiters_regex,
string=symbol,
maxsplit=1,
)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
return company_symbol, share_class_symbol | python | def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol.
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return '', ''
symbol = symbol.upper()
split_list = re.split(
pattern=_delimited_symbol_delimiters_regex,
string=symbol,
maxsplit=1,
)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
return company_symbol, share_class_symbol | [
"def",
"split_delimited_symbol",
"(",
"symbol",
")",
":",
"# return blank strings for any bad fuzzy symbols, like NaN or None",
"if",
"symbol",
"in",
"_delimited_symbol_default_triggers",
":",
"return",
"''",
",",
"''",
"symbol",
"=",
"symbol",
".",
"upper",
"(",
")",
"split_list",
"=",
"re",
".",
"split",
"(",
"pattern",
"=",
"_delimited_symbol_delimiters_regex",
",",
"string",
"=",
"symbol",
",",
"maxsplit",
"=",
"1",
",",
")",
"# Break the list up in to its two components, the company symbol and the",
"# share class symbol",
"company_symbol",
"=",
"split_list",
"[",
"0",
"]",
"if",
"len",
"(",
"split_list",
")",
">",
"1",
":",
"share_class_symbol",
"=",
"split_list",
"[",
"1",
"]",
"else",
":",
"share_class_symbol",
"=",
"''",
"return",
"company_symbol",
",",
"share_class_symbol"
] | Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol. | [
"Takes",
"in",
"a",
"symbol",
"that",
"may",
"be",
"delimited",
"and",
"splits",
"it",
"in",
"to",
"a",
"company",
"symbol",
"and",
"share",
"class",
"symbol",
".",
"Also",
"returns",
"the",
"fuzzy",
"symbol",
"which",
"is",
"the",
"symbol",
"without",
"any",
"fuzzy",
"characters",
"at",
"all",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L175-L213 |
26,035 | quantopian/zipline | zipline/assets/asset_writer.py | _generate_output_dataframe | def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are a function from dataframe and
column name to the default values to insert in the DataFrame if no user
data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col](data_subset, col)
return data_subset | python | def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are a function from dataframe and
column name to the default values to insert in the DataFrame if no user
data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col](data_subset, col)
return data_subset | [
"def",
"_generate_output_dataframe",
"(",
"data_subset",
",",
"defaults",
")",
":",
"# The columns provided.",
"cols",
"=",
"set",
"(",
"data_subset",
".",
"columns",
")",
"desired_cols",
"=",
"set",
"(",
"defaults",
")",
"# Drop columns with unrecognised headers.",
"data_subset",
".",
"drop",
"(",
"cols",
"-",
"desired_cols",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"# Get those columns which we need but",
"# for which no data has been supplied.",
"for",
"col",
"in",
"desired_cols",
"-",
"cols",
":",
"# write the default value for any missing columns",
"data_subset",
"[",
"col",
"]",
"=",
"defaults",
"[",
"col",
"]",
"(",
"data_subset",
",",
"col",
")",
"return",
"data_subset"
] | Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are a function from dataframe and
column name to the default values to insert in the DataFrame if no user
data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing | [
"Generates",
"an",
"output",
"dataframe",
"from",
"the",
"given",
"subset",
"of",
"user",
"-",
"provided",
"data",
"the",
"given",
"column",
"names",
"and",
"the",
"given",
"default",
"values",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L216-L254 |
26,036 | quantopian/zipline | zipline/assets/asset_writer.py | _check_symbol_mappings | def _check_symbol_mappings(df, exchanges, asset_exchange):
"""Check that there are no cases where multiple symbols resolve to the same
asset at the same time in the same country.
Parameters
----------
df : pd.DataFrame
The equity symbol mappings table.
exchanges : pd.DataFrame
The exchanges table.
asset_exchange : pd.Series
A series that maps sids to the exchange the asset is in.
Raises
------
ValueError
Raised when there are ambiguous symbol mappings.
"""
mappings = df.set_index('sid')[list(mapping_columns)].copy()
mappings['country_code'] = exchanges['country_code'][
asset_exchange.loc[df['sid']]
].values
ambigious = {}
def check_intersections(persymbol):
intersections = list(intersecting_ranges(map(
from_tuple,
zip(persymbol.start_date, persymbol.end_date),
)))
if intersections:
data = persymbol[
['start_date', 'end_date']
].astype('datetime64[ns]')
# indent the dataframe string, also compute this early because
# ``persymbol`` is a view and ``astype`` doesn't copy the index
# correctly in pandas 0.22
msg_component = '\n '.join(str(data).splitlines())
ambigious[persymbol.name] = intersections, msg_component
mappings.groupby(['symbol', 'country_code']).apply(check_intersections)
if ambigious:
raise ValueError(
'Ambiguous ownership for %d symbol%s, multiple assets held the'
' following symbols:\n%s' % (
len(ambigious),
'' if len(ambigious) == 1 else 's',
'\n'.join(
'%s (%s):\n intersections: %s\n %s' % (
symbol,
country_code,
tuple(map(_format_range, intersections)),
cs,
)
for (symbol, country_code), (intersections, cs) in sorted(
ambigious.items(),
key=first,
),
),
)
) | python | def _check_symbol_mappings(df, exchanges, asset_exchange):
"""Check that there are no cases where multiple symbols resolve to the same
asset at the same time in the same country.
Parameters
----------
df : pd.DataFrame
The equity symbol mappings table.
exchanges : pd.DataFrame
The exchanges table.
asset_exchange : pd.Series
A series that maps sids to the exchange the asset is in.
Raises
------
ValueError
Raised when there are ambiguous symbol mappings.
"""
mappings = df.set_index('sid')[list(mapping_columns)].copy()
mappings['country_code'] = exchanges['country_code'][
asset_exchange.loc[df['sid']]
].values
ambigious = {}
def check_intersections(persymbol):
intersections = list(intersecting_ranges(map(
from_tuple,
zip(persymbol.start_date, persymbol.end_date),
)))
if intersections:
data = persymbol[
['start_date', 'end_date']
].astype('datetime64[ns]')
# indent the dataframe string, also compute this early because
# ``persymbol`` is a view and ``astype`` doesn't copy the index
# correctly in pandas 0.22
msg_component = '\n '.join(str(data).splitlines())
ambigious[persymbol.name] = intersections, msg_component
mappings.groupby(['symbol', 'country_code']).apply(check_intersections)
if ambigious:
raise ValueError(
'Ambiguous ownership for %d symbol%s, multiple assets held the'
' following symbols:\n%s' % (
len(ambigious),
'' if len(ambigious) == 1 else 's',
'\n'.join(
'%s (%s):\n intersections: %s\n %s' % (
symbol,
country_code,
tuple(map(_format_range, intersections)),
cs,
)
for (symbol, country_code), (intersections, cs) in sorted(
ambigious.items(),
key=first,
),
),
)
) | [
"def",
"_check_symbol_mappings",
"(",
"df",
",",
"exchanges",
",",
"asset_exchange",
")",
":",
"mappings",
"=",
"df",
".",
"set_index",
"(",
"'sid'",
")",
"[",
"list",
"(",
"mapping_columns",
")",
"]",
".",
"copy",
"(",
")",
"mappings",
"[",
"'country_code'",
"]",
"=",
"exchanges",
"[",
"'country_code'",
"]",
"[",
"asset_exchange",
".",
"loc",
"[",
"df",
"[",
"'sid'",
"]",
"]",
"]",
".",
"values",
"ambigious",
"=",
"{",
"}",
"def",
"check_intersections",
"(",
"persymbol",
")",
":",
"intersections",
"=",
"list",
"(",
"intersecting_ranges",
"(",
"map",
"(",
"from_tuple",
",",
"zip",
"(",
"persymbol",
".",
"start_date",
",",
"persymbol",
".",
"end_date",
")",
",",
")",
")",
")",
"if",
"intersections",
":",
"data",
"=",
"persymbol",
"[",
"[",
"'start_date'",
",",
"'end_date'",
"]",
"]",
".",
"astype",
"(",
"'datetime64[ns]'",
")",
"# indent the dataframe string, also compute this early because",
"# ``persymbol`` is a view and ``astype`` doesn't copy the index",
"# correctly in pandas 0.22",
"msg_component",
"=",
"'\\n '",
".",
"join",
"(",
"str",
"(",
"data",
")",
".",
"splitlines",
"(",
")",
")",
"ambigious",
"[",
"persymbol",
".",
"name",
"]",
"=",
"intersections",
",",
"msg_component",
"mappings",
".",
"groupby",
"(",
"[",
"'symbol'",
",",
"'country_code'",
"]",
")",
".",
"apply",
"(",
"check_intersections",
")",
"if",
"ambigious",
":",
"raise",
"ValueError",
"(",
"'Ambiguous ownership for %d symbol%s, multiple assets held the'",
"' following symbols:\\n%s'",
"%",
"(",
"len",
"(",
"ambigious",
")",
",",
"''",
"if",
"len",
"(",
"ambigious",
")",
"==",
"1",
"else",
"'s'",
",",
"'\\n'",
".",
"join",
"(",
"'%s (%s):\\n intersections: %s\\n %s'",
"%",
"(",
"symbol",
",",
"country_code",
",",
"tuple",
"(",
"map",
"(",
"_format_range",
",",
"intersections",
")",
")",
",",
"cs",
",",
")",
"for",
"(",
"symbol",
",",
"country_code",
")",
",",
"(",
"intersections",
",",
"cs",
")",
"in",
"sorted",
"(",
"ambigious",
".",
"items",
"(",
")",
",",
"key",
"=",
"first",
",",
")",
",",
")",
",",
")",
")"
] | Check that there are no cases where multiple symbols resolve to the same
asset at the same time in the same country.
Parameters
----------
df : pd.DataFrame
The equity symbol mappings table.
exchanges : pd.DataFrame
The exchanges table.
asset_exchange : pd.Series
A series that maps sids to the exchange the asset is in.
Raises
------
ValueError
Raised when there are ambiguous symbol mappings. | [
"Check",
"that",
"there",
"are",
"no",
"cases",
"where",
"multiple",
"symbols",
"resolve",
"to",
"the",
"same",
"asset",
"at",
"the",
"same",
"time",
"in",
"the",
"same",
"country",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L272-L332 |
26,037 | quantopian/zipline | zipline/assets/asset_writer.py | _dt_to_epoch_ns | def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64) | python | def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64) | [
"def",
"_dt_to_epoch_ns",
"(",
"dt_series",
")",
":",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"dt_series",
".",
"values",
")",
"if",
"index",
".",
"tzinfo",
"is",
"None",
":",
"index",
"=",
"index",
".",
"tz_localize",
"(",
"'UTC'",
")",
"else",
":",
"index",
"=",
"index",
".",
"tz_convert",
"(",
"'UTC'",
")",
"return",
"index",
".",
"view",
"(",
"np",
".",
"int64",
")"
] | Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch. | [
"Convert",
"a",
"timeseries",
"into",
"an",
"Int64Index",
"of",
"nanoseconds",
"since",
"the",
"epoch",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L371-L389 |
26,038 | quantopian/zipline | zipline/assets/asset_writer.py | check_version_info | def check_version_info(conn, version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version) | python | def check_version_info(conn, version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version) | [
"def",
"check_version_info",
"(",
"conn",
",",
"version_table",
",",
"expected_version",
")",
":",
"# Read the version out of the table",
"version_from_table",
"=",
"conn",
".",
"execute",
"(",
"sa",
".",
"select",
"(",
"(",
"version_table",
".",
"c",
".",
"version",
",",
")",
")",
",",
")",
".",
"scalar",
"(",
")",
"# A db without a version is considered v0",
"if",
"version_from_table",
"is",
"None",
":",
"version_from_table",
"=",
"0",
"# Raise an error if the versions do not match",
"if",
"(",
"version_from_table",
"!=",
"expected_version",
")",
":",
"raise",
"AssetDBVersionError",
"(",
"db_version",
"=",
"version_from_table",
",",
"expected_version",
"=",
"expected_version",
")"
] | Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION. | [
"Checks",
"for",
"a",
"version",
"value",
"in",
"the",
"version",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L392-L423 |
26,039 | quantopian/zipline | zipline/assets/asset_writer.py | write_version_info | def write_version_info(conn, version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
conn.execute(sa.insert(version_table, values={'version': version_value})) | python | def write_version_info(conn, version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
conn.execute(sa.insert(version_table, values={'version': version_value})) | [
"def",
"write_version_info",
"(",
"conn",
",",
"version_table",
",",
"version_value",
")",
":",
"conn",
".",
"execute",
"(",
"sa",
".",
"insert",
"(",
"version_table",
",",
"values",
"=",
"{",
"'version'",
":",
"version_value",
"}",
")",
")"
] | Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database | [
"Inserts",
"the",
"version",
"value",
"in",
"to",
"the",
"version",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L426-L440 |
26,040 | quantopian/zipline | zipline/assets/asset_writer.py | AssetDBWriter.write_direct | def write_direct(self,
equities=None,
equity_symbol_mappings=None,
equity_supplementary_mappings=None,
futures=None,
exchanges=None,
root_symbols=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database in the format that it is
stored in the assets db.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
"""
if equities is not None:
equities = _generate_output_dataframe(
equities,
_direct_equities_defaults,
)
if equity_symbol_mappings is None:
raise ValueError(
'equities provided with no symbol mapping data',
)
equity_symbol_mappings = _generate_output_dataframe(
equity_symbol_mappings,
_equity_symbol_mappings_defaults,
)
_check_symbol_mappings(
equity_symbol_mappings,
exchanges,
equities['exchange'],
)
if equity_supplementary_mappings is not None:
equity_supplementary_mappings = _generate_output_dataframe(
equity_supplementary_mappings,
_equity_supplementary_mappings_defaults,
)
if futures is not None:
futures = _generate_output_dataframe(_futures_defaults, futures)
if exchanges is not None:
exchanges = _generate_output_dataframe(
exchanges.set_index('exchange'),
_exchanges_defaults,
)
if root_symbols is not None:
root_symbols = _generate_output_dataframe(
root_symbols,
_root_symbols_defaults,
)
# Set named identifier columns as indices, if provided.
_normalize_index_columns_in_place(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
)
self._real_write(
equities=equities,
equity_symbol_mappings=equity_symbol_mappings,
equity_supplementary_mappings=equity_supplementary_mappings,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
chunk_size=chunk_size,
) | python | def write_direct(self,
equities=None,
equity_symbol_mappings=None,
equity_supplementary_mappings=None,
futures=None,
exchanges=None,
root_symbols=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database in the format that it is
stored in the assets db.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
"""
if equities is not None:
equities = _generate_output_dataframe(
equities,
_direct_equities_defaults,
)
if equity_symbol_mappings is None:
raise ValueError(
'equities provided with no symbol mapping data',
)
equity_symbol_mappings = _generate_output_dataframe(
equity_symbol_mappings,
_equity_symbol_mappings_defaults,
)
_check_symbol_mappings(
equity_symbol_mappings,
exchanges,
equities['exchange'],
)
if equity_supplementary_mappings is not None:
equity_supplementary_mappings = _generate_output_dataframe(
equity_supplementary_mappings,
_equity_supplementary_mappings_defaults,
)
if futures is not None:
futures = _generate_output_dataframe(_futures_defaults, futures)
if exchanges is not None:
exchanges = _generate_output_dataframe(
exchanges.set_index('exchange'),
_exchanges_defaults,
)
if root_symbols is not None:
root_symbols = _generate_output_dataframe(
root_symbols,
_root_symbols_defaults,
)
# Set named identifier columns as indices, if provided.
_normalize_index_columns_in_place(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
)
self._real_write(
equities=equities,
equity_symbol_mappings=equity_symbol_mappings,
equity_supplementary_mappings=equity_supplementary_mappings,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
chunk_size=chunk_size,
) | [
"def",
"write_direct",
"(",
"self",
",",
"equities",
"=",
"None",
",",
"equity_symbol_mappings",
"=",
"None",
",",
"equity_supplementary_mappings",
"=",
"None",
",",
"futures",
"=",
"None",
",",
"exchanges",
"=",
"None",
",",
"root_symbols",
"=",
"None",
",",
"chunk_size",
"=",
"DEFAULT_CHUNK_SIZE",
")",
":",
"if",
"equities",
"is",
"not",
"None",
":",
"equities",
"=",
"_generate_output_dataframe",
"(",
"equities",
",",
"_direct_equities_defaults",
",",
")",
"if",
"equity_symbol_mappings",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'equities provided with no symbol mapping data'",
",",
")",
"equity_symbol_mappings",
"=",
"_generate_output_dataframe",
"(",
"equity_symbol_mappings",
",",
"_equity_symbol_mappings_defaults",
",",
")",
"_check_symbol_mappings",
"(",
"equity_symbol_mappings",
",",
"exchanges",
",",
"equities",
"[",
"'exchange'",
"]",
",",
")",
"if",
"equity_supplementary_mappings",
"is",
"not",
"None",
":",
"equity_supplementary_mappings",
"=",
"_generate_output_dataframe",
"(",
"equity_supplementary_mappings",
",",
"_equity_supplementary_mappings_defaults",
",",
")",
"if",
"futures",
"is",
"not",
"None",
":",
"futures",
"=",
"_generate_output_dataframe",
"(",
"_futures_defaults",
",",
"futures",
")",
"if",
"exchanges",
"is",
"not",
"None",
":",
"exchanges",
"=",
"_generate_output_dataframe",
"(",
"exchanges",
".",
"set_index",
"(",
"'exchange'",
")",
",",
"_exchanges_defaults",
",",
")",
"if",
"root_symbols",
"is",
"not",
"None",
":",
"root_symbols",
"=",
"_generate_output_dataframe",
"(",
"root_symbols",
",",
"_root_symbols_defaults",
",",
")",
"# Set named identifier columns as indices, if provided.",
"_normalize_index_columns_in_place",
"(",
"equities",
"=",
"equities",
",",
"equity_supplementary_mappings",
"=",
"equity_supplementary_mappings",
",",
"futures",
"=",
"futures",
",",
"exchanges",
"=",
"exchanges",
",",
"root_symbols",
"=",
"root_symbols",
",",
")",
"self",
".",
"_real_write",
"(",
"equities",
"=",
"equities",
",",
"equity_symbol_mappings",
"=",
"equity_symbol_mappings",
",",
"equity_supplementary_mappings",
"=",
"equity_supplementary_mappings",
",",
"futures",
"=",
"futures",
",",
"exchanges",
"=",
"exchanges",
",",
"root_symbols",
"=",
"root_symbols",
",",
"chunk_size",
"=",
"chunk_size",
",",
")"
] | Write asset metadata to a sqlite database in the format that it is
stored in the assets db.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here. | [
"Write",
"asset",
"metadata",
"to",
"a",
"sqlite",
"database",
"in",
"the",
"format",
"that",
"it",
"is",
"stored",
"in",
"the",
"assets",
"db",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L514-L668 |
26,041 | quantopian/zipline | zipline/assets/asset_writer.py | AssetDBWriter.write | def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
equity_supplementary_mappings=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder
"""
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
})
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else pd.DataFrame(),
(
equity_supplementary_mappings
if equity_supplementary_mappings is not None
else pd.DataFrame()
),
)
self._real_write(
equities=data.equities,
equity_symbol_mappings=data.equities_mappings,
equity_supplementary_mappings=data.equity_supplementary_mappings,
futures=data.futures,
root_symbols=data.root_symbols,
exchanges=data.exchanges,
chunk_size=chunk_size,
) | python | def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
equity_supplementary_mappings=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder
"""
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
})
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else pd.DataFrame(),
(
equity_supplementary_mappings
if equity_supplementary_mappings is not None
else pd.DataFrame()
),
)
self._real_write(
equities=data.equities,
equity_symbol_mappings=data.equities_mappings,
equity_supplementary_mappings=data.equity_supplementary_mappings,
futures=data.futures,
root_symbols=data.root_symbols,
exchanges=data.exchanges,
chunk_size=chunk_size,
) | [
"def",
"write",
"(",
"self",
",",
"equities",
"=",
"None",
",",
"futures",
"=",
"None",
",",
"exchanges",
"=",
"None",
",",
"root_symbols",
"=",
"None",
",",
"equity_supplementary_mappings",
"=",
"None",
",",
"chunk_size",
"=",
"DEFAULT_CHUNK_SIZE",
")",
":",
"if",
"exchanges",
"is",
"None",
":",
"exchange_names",
"=",
"[",
"df",
"[",
"'exchange'",
"]",
"for",
"df",
"in",
"(",
"equities",
",",
"futures",
",",
"root_symbols",
")",
"if",
"df",
"is",
"not",
"None",
"]",
"if",
"exchange_names",
":",
"exchanges",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'exchange'",
":",
"pd",
".",
"concat",
"(",
"exchange_names",
")",
".",
"unique",
"(",
")",
",",
"}",
")",
"data",
"=",
"self",
".",
"_load_data",
"(",
"equities",
"if",
"equities",
"is",
"not",
"None",
"else",
"pd",
".",
"DataFrame",
"(",
")",
",",
"futures",
"if",
"futures",
"is",
"not",
"None",
"else",
"pd",
".",
"DataFrame",
"(",
")",
",",
"exchanges",
"if",
"exchanges",
"is",
"not",
"None",
"else",
"pd",
".",
"DataFrame",
"(",
")",
",",
"root_symbols",
"if",
"root_symbols",
"is",
"not",
"None",
"else",
"pd",
".",
"DataFrame",
"(",
")",
",",
"(",
"equity_supplementary_mappings",
"if",
"equity_supplementary_mappings",
"is",
"not",
"None",
"else",
"pd",
".",
"DataFrame",
"(",
")",
")",
",",
")",
"self",
".",
"_real_write",
"(",
"equities",
"=",
"data",
".",
"equities",
",",
"equity_symbol_mappings",
"=",
"data",
".",
"equities_mappings",
",",
"equity_supplementary_mappings",
"=",
"data",
".",
"equity_supplementary_mappings",
",",
"futures",
"=",
"data",
".",
"futures",
",",
"root_symbols",
"=",
"data",
".",
"root_symbols",
",",
"exchanges",
"=",
"data",
".",
"exchanges",
",",
"chunk_size",
"=",
"chunk_size",
",",
")"
] | Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder | [
"Write",
"asset",
"metadata",
"to",
"a",
"sqlite",
"database",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L670-L797 |
26,042 | quantopian/zipline | zipline/assets/asset_writer.py | AssetDBWriter._all_tables_present | def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False | python | def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False | [
"def",
"_all_tables_present",
"(",
"self",
",",
"txn",
")",
":",
"conn",
"=",
"txn",
".",
"connect",
"(",
")",
"for",
"table_name",
"in",
"asset_db_table_names",
":",
"if",
"txn",
".",
"dialect",
".",
"has_table",
"(",
"conn",
",",
"table_name",
")",
":",
"return",
"True",
"return",
"False"
] | Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False. | [
"Checks",
"if",
"any",
"tables",
"are",
"present",
"in",
"the",
"current",
"assets",
"database",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L856-L874 |
26,043 | quantopian/zipline | zipline/assets/asset_writer.py | AssetDBWriter.init_db | def init_db(self, txn=None):
"""Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db.
"""
with ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self.engine.begin())
tables_already_exist = self._all_tables_present(txn)
# Create the SQL tables if they do not already exist.
metadata.create_all(txn, checkfirst=True)
if tables_already_exist:
check_version_info(txn, version_info, ASSET_DB_VERSION)
else:
write_version_info(txn, version_info, ASSET_DB_VERSION) | python | def init_db(self, txn=None):
"""Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db.
"""
with ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self.engine.begin())
tables_already_exist = self._all_tables_present(txn)
# Create the SQL tables if they do not already exist.
metadata.create_all(txn, checkfirst=True)
if tables_already_exist:
check_version_info(txn, version_info, ASSET_DB_VERSION)
else:
write_version_info(txn, version_info, ASSET_DB_VERSION) | [
"def",
"init_db",
"(",
"self",
",",
"txn",
"=",
"None",
")",
":",
"with",
"ExitStack",
"(",
")",
"as",
"stack",
":",
"if",
"txn",
"is",
"None",
":",
"txn",
"=",
"stack",
".",
"enter_context",
"(",
"self",
".",
"engine",
".",
"begin",
"(",
")",
")",
"tables_already_exist",
"=",
"self",
".",
"_all_tables_present",
"(",
"txn",
")",
"# Create the SQL tables if they do not already exist.",
"metadata",
".",
"create_all",
"(",
"txn",
",",
"checkfirst",
"=",
"True",
")",
"if",
"tables_already_exist",
":",
"check_version_info",
"(",
"txn",
",",
"version_info",
",",
"ASSET_DB_VERSION",
")",
"else",
":",
"write_version_info",
"(",
"txn",
",",
"version_info",
",",
"ASSET_DB_VERSION",
")"
] | Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db. | [
"Connect",
"to",
"database",
"and",
"create",
"tables",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_writer.py#L876-L902 |
26,044 | quantopian/zipline | zipline/pipeline/loaders/blaze/utils.py | load_raw_data | def load_raw_data(assets,
data_query_cutoff_times,
expr,
odo_kwargs,
checkpoints=None):
"""
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
data_query_cutoff_times : pd.DatetimeIndex
The datetime when data should no longer be considered available for
a session.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe.
"""
lower_dt, upper_dt = data_query_cutoff_times[[0, -1]]
raw = ffill_query_in_range(
expr,
lower_dt,
upper_dt,
checkpoints=checkpoints,
odo_kwargs=odo_kwargs,
)
sids = raw[SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
return raw | python | def load_raw_data(assets,
data_query_cutoff_times,
expr,
odo_kwargs,
checkpoints=None):
"""
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
data_query_cutoff_times : pd.DatetimeIndex
The datetime when data should no longer be considered available for
a session.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe.
"""
lower_dt, upper_dt = data_query_cutoff_times[[0, -1]]
raw = ffill_query_in_range(
expr,
lower_dt,
upper_dt,
checkpoints=checkpoints,
odo_kwargs=odo_kwargs,
)
sids = raw[SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
return raw | [
"def",
"load_raw_data",
"(",
"assets",
",",
"data_query_cutoff_times",
",",
"expr",
",",
"odo_kwargs",
",",
"checkpoints",
"=",
"None",
")",
":",
"lower_dt",
",",
"upper_dt",
"=",
"data_query_cutoff_times",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"raw",
"=",
"ffill_query_in_range",
"(",
"expr",
",",
"lower_dt",
",",
"upper_dt",
",",
"checkpoints",
"=",
"checkpoints",
",",
"odo_kwargs",
"=",
"odo_kwargs",
",",
")",
"sids",
"=",
"raw",
"[",
"SID_FIELD_NAME",
"]",
"raw",
".",
"drop",
"(",
"sids",
"[",
"~",
"sids",
".",
"isin",
"(",
"assets",
")",
"]",
".",
"index",
",",
"inplace",
"=",
"True",
")",
"return",
"raw"
] | Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
data_query_cutoff_times : pd.DatetimeIndex
The datetime when data should no longer be considered available for
a session.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe. | [
"Given",
"an",
"expression",
"representing",
"data",
"to",
"load",
"perform",
"normalization",
"and",
"forward",
"-",
"filling",
"and",
"return",
"the",
"data",
"materialized",
".",
"Only",
"accepts",
"data",
"with",
"a",
"sid",
"field",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/utils.py#L5-L48 |
26,045 | quantopian/zipline | zipline/utils/range.py | from_tuple | def from_tuple(tup):
"""Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3.
"""
if len(tup) not in (2, 3):
raise ValueError(
'tuple must contain 2 or 3 elements, not: %d (%r' % (
len(tup),
tup,
),
)
return range(*tup) | python | def from_tuple(tup):
"""Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3.
"""
if len(tup) not in (2, 3):
raise ValueError(
'tuple must contain 2 or 3 elements, not: %d (%r' % (
len(tup),
tup,
),
)
return range(*tup) | [
"def",
"from_tuple",
"(",
"tup",
")",
":",
"if",
"len",
"(",
"tup",
")",
"not",
"in",
"(",
"2",
",",
"3",
")",
":",
"raise",
"ValueError",
"(",
"'tuple must contain 2 or 3 elements, not: %d (%r'",
"%",
"(",
"len",
"(",
"tup",
")",
",",
"tup",
",",
")",
",",
")",
"return",
"range",
"(",
"*",
"tup",
")"
] | Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3. | [
"Convert",
"a",
"tuple",
"into",
"a",
"range",
"with",
"error",
"handling",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L151-L176 |
26,046 | quantopian/zipline | zipline/utils/range.py | maybe_from_tuple | def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
'maybe_from_tuple expects a tuple or range, got %r: %r' % (
type(tup_or_range).__name__,
tup_or_range,
),
) | python | def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
'maybe_from_tuple expects a tuple or range, got %r: %r' % (
type(tup_or_range).__name__,
tup_or_range,
),
) | [
"def",
"maybe_from_tuple",
"(",
"tup_or_range",
")",
":",
"if",
"isinstance",
"(",
"tup_or_range",
",",
"tuple",
")",
":",
"return",
"from_tuple",
"(",
"tup_or_range",
")",
"elif",
"isinstance",
"(",
"tup_or_range",
",",
"range",
")",
":",
"return",
"tup_or_range",
"raise",
"ValueError",
"(",
"'maybe_from_tuple expects a tuple or range, got %r: %r'",
"%",
"(",
"type",
"(",
"tup_or_range",
")",
".",
"__name__",
",",
"tup_or_range",
",",
")",
",",
")"
] | Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3. | [
"Convert",
"a",
"tuple",
"into",
"a",
"range",
"but",
"pass",
"ranges",
"through",
"silently",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L179-L212 |
26,047 | quantopian/zipline | zipline/utils/range.py | _check_steps | def _check_steps(a, b):
"""Check that the steps of ``a`` and ``b`` are both 1.
Parameters
----------
a : range
The first range to check.
b : range
The second range to check.
Raises
------
ValueError
Raised when either step is not 1.
"""
if a.step != 1:
raise ValueError('a.step must be equal to 1, got: %s' % a.step)
if b.step != 1:
raise ValueError('b.step must be equal to 1, got: %s' % b.step) | python | def _check_steps(a, b):
"""Check that the steps of ``a`` and ``b`` are both 1.
Parameters
----------
a : range
The first range to check.
b : range
The second range to check.
Raises
------
ValueError
Raised when either step is not 1.
"""
if a.step != 1:
raise ValueError('a.step must be equal to 1, got: %s' % a.step)
if b.step != 1:
raise ValueError('b.step must be equal to 1, got: %s' % b.step) | [
"def",
"_check_steps",
"(",
"a",
",",
"b",
")",
":",
"if",
"a",
".",
"step",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'a.step must be equal to 1, got: %s'",
"%",
"a",
".",
"step",
")",
"if",
"b",
".",
"step",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'b.step must be equal to 1, got: %s'",
"%",
"b",
".",
"step",
")"
] | Check that the steps of ``a`` and ``b`` are both 1.
Parameters
----------
a : range
The first range to check.
b : range
The second range to check.
Raises
------
ValueError
Raised when either step is not 1. | [
"Check",
"that",
"the",
"steps",
"of",
"a",
"and",
"b",
"are",
"both",
"1",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L215-L233 |
26,048 | quantopian/zipline | zipline/utils/range.py | overlap | def overlap(a, b):
"""Check if two ranges overlap.
Parameters
----------
a : range
The first range.
b : range
The second range.
Returns
-------
overlaps : bool
Do these ranges overlap.
Notes
-----
This function does not support ranges with step != 1.
"""
_check_steps(a, b)
return a.stop >= b.start and b.stop >= a.start | python | def overlap(a, b):
"""Check if two ranges overlap.
Parameters
----------
a : range
The first range.
b : range
The second range.
Returns
-------
overlaps : bool
Do these ranges overlap.
Notes
-----
This function does not support ranges with step != 1.
"""
_check_steps(a, b)
return a.stop >= b.start and b.stop >= a.start | [
"def",
"overlap",
"(",
"a",
",",
"b",
")",
":",
"_check_steps",
"(",
"a",
",",
"b",
")",
"return",
"a",
".",
"stop",
">=",
"b",
".",
"start",
"and",
"b",
".",
"stop",
">=",
"a",
".",
"start"
] | Check if two ranges overlap.
Parameters
----------
a : range
The first range.
b : range
The second range.
Returns
-------
overlaps : bool
Do these ranges overlap.
Notes
-----
This function does not support ranges with step != 1. | [
"Check",
"if",
"two",
"ranges",
"overlap",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L236-L256 |
26,049 | quantopian/zipline | zipline/utils/range.py | merge | def merge(a, b):
"""Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range.
"""
_check_steps(a, b)
return range(min(a.start, b.start), max(a.stop, b.stop)) | python | def merge(a, b):
"""Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range.
"""
_check_steps(a, b)
return range(min(a.start, b.start), max(a.stop, b.stop)) | [
"def",
"merge",
"(",
"a",
",",
"b",
")",
":",
"_check_steps",
"(",
"a",
",",
"b",
")",
"return",
"range",
"(",
"min",
"(",
"a",
".",
"start",
",",
"b",
".",
"start",
")",
",",
"max",
"(",
"a",
".",
"stop",
",",
"b",
".",
"stop",
")",
")"
] | Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range. | [
"Merge",
"two",
"ranges",
"with",
"step",
"==",
"1",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L259-L270 |
26,050 | quantopian/zipline | zipline/utils/range.py | _combine | def _combine(n, rs):
"""helper for ``_group_ranges``
"""
try:
r, rs = peek(rs)
except StopIteration:
yield n
return
if overlap(n, r):
yield merge(n, r)
next(rs)
for r in rs:
yield r
else:
yield n
for r in rs:
yield r | python | def _combine(n, rs):
"""helper for ``_group_ranges``
"""
try:
r, rs = peek(rs)
except StopIteration:
yield n
return
if overlap(n, r):
yield merge(n, r)
next(rs)
for r in rs:
yield r
else:
yield n
for r in rs:
yield r | [
"def",
"_combine",
"(",
"n",
",",
"rs",
")",
":",
"try",
":",
"r",
",",
"rs",
"=",
"peek",
"(",
"rs",
")",
"except",
"StopIteration",
":",
"yield",
"n",
"return",
"if",
"overlap",
"(",
"n",
",",
"r",
")",
":",
"yield",
"merge",
"(",
"n",
",",
"r",
")",
"next",
"(",
"rs",
")",
"for",
"r",
"in",
"rs",
":",
"yield",
"r",
"else",
":",
"yield",
"n",
"for",
"r",
"in",
"rs",
":",
"yield",
"r"
] | helper for ``_group_ranges`` | [
"helper",
"for",
"_group_ranges"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L273-L290 |
26,051 | quantopian/zipline | zipline/utils/range.py | intersecting_ranges | def intersecting_ranges(ranges):
"""Return any ranges that intersect.
Parameters
----------
ranges : iterable[ranges]
A sequence of ranges to check for intersections.
Returns
-------
intersections : iterable[ranges]
A sequence of all of the ranges that intersected in ``ranges``.
Examples
--------
>>> ranges = [range(0, 1), range(2, 5), range(4, 7)]
>>> list(intersecting_ranges(ranges))
[range(2, 5), range(4, 7)]
>>> ranges = [range(0, 1), range(2, 3)]
>>> list(intersecting_ranges(ranges))
[]
>>> ranges = [range(0, 1), range(1, 2)]
>>> list(intersecting_ranges(ranges))
[range(0, 1), range(1, 2)]
"""
ranges = sorted(ranges, key=op.attrgetter('start'))
return sorted_diff(ranges, group_ranges(ranges)) | python | def intersecting_ranges(ranges):
"""Return any ranges that intersect.
Parameters
----------
ranges : iterable[ranges]
A sequence of ranges to check for intersections.
Returns
-------
intersections : iterable[ranges]
A sequence of all of the ranges that intersected in ``ranges``.
Examples
--------
>>> ranges = [range(0, 1), range(2, 5), range(4, 7)]
>>> list(intersecting_ranges(ranges))
[range(2, 5), range(4, 7)]
>>> ranges = [range(0, 1), range(2, 3)]
>>> list(intersecting_ranges(ranges))
[]
>>> ranges = [range(0, 1), range(1, 2)]
>>> list(intersecting_ranges(ranges))
[range(0, 1), range(1, 2)]
"""
ranges = sorted(ranges, key=op.attrgetter('start'))
return sorted_diff(ranges, group_ranges(ranges)) | [
"def",
"intersecting_ranges",
"(",
"ranges",
")",
":",
"ranges",
"=",
"sorted",
"(",
"ranges",
",",
"key",
"=",
"op",
".",
"attrgetter",
"(",
"'start'",
")",
")",
"return",
"sorted_diff",
"(",
"ranges",
",",
"group_ranges",
"(",
"ranges",
")",
")"
] | Return any ranges that intersect.
Parameters
----------
ranges : iterable[ranges]
A sequence of ranges to check for intersections.
Returns
-------
intersections : iterable[ranges]
A sequence of all of the ranges that intersected in ``ranges``.
Examples
--------
>>> ranges = [range(0, 1), range(2, 5), range(4, 7)]
>>> list(intersecting_ranges(ranges))
[range(2, 5), range(4, 7)]
>>> ranges = [range(0, 1), range(2, 3)]
>>> list(intersecting_ranges(ranges))
[]
>>> ranges = [range(0, 1), range(1, 2)]
>>> list(intersecting_ranges(ranges))
[range(0, 1), range(1, 2)] | [
"Return",
"any",
"ranges",
"that",
"intersect",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/range.py#L336-L364 |
26,052 | quantopian/zipline | zipline/data/loader.py | get_data_filepath | def get_data_filepath(name, environ=None):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root(environ)
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name) | python | def get_data_filepath(name, environ=None):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root(environ)
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name) | [
"def",
"get_data_filepath",
"(",
"name",
",",
"environ",
"=",
"None",
")",
":",
"dr",
"=",
"data_root",
"(",
"environ",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dr",
")",
":",
"os",
".",
"makedirs",
"(",
"dr",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"dr",
",",
"name",
")"
] | Returns a handle to data file.
Creates containing directory, if needed. | [
"Returns",
"a",
"handle",
"to",
"data",
"file",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L52-L63 |
26,053 | quantopian/zipline | zipline/data/loader.py | has_data_for_dates | def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date) | python | def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date) | [
"def",
"has_data_for_dates",
"(",
"series_or_df",
",",
"first_date",
",",
"last_date",
")",
":",
"dts",
"=",
"series_or_df",
".",
"index",
"if",
"not",
"isinstance",
"(",
"dts",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected a DatetimeIndex, but got %s.\"",
"%",
"type",
"(",
"dts",
")",
")",
"first",
",",
"last",
"=",
"dts",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"return",
"(",
"first",
"<=",
"first_date",
")",
"and",
"(",
"last",
">=",
"last_date",
")"
] | Does `series_or_df` have data on or before first_date and on or after
last_date? | [
"Does",
"series_or_df",
"have",
"data",
"on",
"or",
"before",
"first_date",
"and",
"on",
"or",
"after",
"last_date?"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L78-L87 |
26,054 | quantopian/zipline | zipline/data/loader.py | load_market_data | def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY',
environ=None):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from IEX Trading. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the ticker
for the S&P 500, provided by IEX Trading.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
if trading_day is None:
trading_day = get_calendar('XNYS').day
if trading_days is None:
trading_days = get_calendar('XNYS').all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# we will fill missing benchmark data through latest trading date
last_date = trading_days[trading_days.get_loc(now, method='ffill')]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
environ,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
environ,
)
# combine dt indices and reindex using ffill then bfill
all_dt = br.index.union(tc.index)
br = br.reindex(all_dt, method='ffill').fillna(method='bfill')
tc = tc.reindex(all_dt, method='ffill').fillna(method='bfill')
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves | python | def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY',
environ=None):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from IEX Trading. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the ticker
for the S&P 500, provided by IEX Trading.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
if trading_day is None:
trading_day = get_calendar('XNYS').day
if trading_days is None:
trading_days = get_calendar('XNYS').all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# we will fill missing benchmark data through latest trading date
last_date = trading_days[trading_days.get_loc(now, method='ffill')]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
environ,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
environ,
)
# combine dt indices and reindex using ffill then bfill
all_dt = br.index.union(tc.index)
br = br.reindex(all_dt, method='ffill').fillna(method='bfill')
tc = tc.reindex(all_dt, method='ffill').fillna(method='bfill')
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves | [
"def",
"load_market_data",
"(",
"trading_day",
"=",
"None",
",",
"trading_days",
"=",
"None",
",",
"bm_symbol",
"=",
"'SPY'",
",",
"environ",
"=",
"None",
")",
":",
"if",
"trading_day",
"is",
"None",
":",
"trading_day",
"=",
"get_calendar",
"(",
"'XNYS'",
")",
".",
"day",
"if",
"trading_days",
"is",
"None",
":",
"trading_days",
"=",
"get_calendar",
"(",
"'XNYS'",
")",
".",
"all_sessions",
"first_date",
"=",
"trading_days",
"[",
"0",
"]",
"now",
"=",
"pd",
".",
"Timestamp",
".",
"utcnow",
"(",
")",
"# we will fill missing benchmark data through latest trading date",
"last_date",
"=",
"trading_days",
"[",
"trading_days",
".",
"get_loc",
"(",
"now",
",",
"method",
"=",
"'ffill'",
")",
"]",
"br",
"=",
"ensure_benchmark_data",
"(",
"bm_symbol",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"# We need the trading_day to figure out the close prior to the first",
"# date so that we can compute returns for the first date.",
"trading_day",
",",
"environ",
",",
")",
"tc",
"=",
"ensure_treasury_data",
"(",
"bm_symbol",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"environ",
",",
")",
"# combine dt indices and reindex using ffill then bfill",
"all_dt",
"=",
"br",
".",
"index",
".",
"union",
"(",
"tc",
".",
"index",
")",
"br",
"=",
"br",
".",
"reindex",
"(",
"all_dt",
",",
"method",
"=",
"'ffill'",
")",
".",
"fillna",
"(",
"method",
"=",
"'bfill'",
")",
"tc",
"=",
"tc",
".",
"reindex",
"(",
"all_dt",
",",
"method",
"=",
"'ffill'",
")",
".",
"fillna",
"(",
"method",
"=",
"'bfill'",
")",
"benchmark_returns",
"=",
"br",
"[",
"br",
".",
"index",
".",
"slice_indexer",
"(",
"first_date",
",",
"last_date",
")",
"]",
"treasury_curves",
"=",
"tc",
"[",
"tc",
".",
"index",
".",
"slice_indexer",
"(",
"first_date",
",",
"last_date",
")",
"]",
"return",
"benchmark_returns",
",",
"treasury_curves"
] | Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from IEX Trading. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the ticker
for the S&P 500, provided by IEX Trading.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year' | [
"Load",
"benchmark",
"returns",
"and",
"treasury",
"yield",
"curves",
"for",
"the",
"given",
"calendar",
"and",
"benchmark",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L90-L166 |
26,055 | quantopian/zipline | zipline/data/loader.py | ensure_benchmark_data | def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day,
environ=None):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
filename = get_benchmark_filename(symbol)
data = _load_cached_data(filename, first_date, last_date, now, 'benchmark',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading benchmark data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
try:
data = get_benchmark_returns(symbol)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('Failed to cache the new benchmark returns')
raise
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected benchmark data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
return data | python | def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day,
environ=None):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
filename = get_benchmark_filename(symbol)
data = _load_cached_data(filename, first_date, last_date, now, 'benchmark',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading benchmark data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
try:
data = get_benchmark_returns(symbol)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('Failed to cache the new benchmark returns')
raise
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected benchmark data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
return data | [
"def",
"ensure_benchmark_data",
"(",
"symbol",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"trading_day",
",",
"environ",
"=",
"None",
")",
":",
"filename",
"=",
"get_benchmark_filename",
"(",
"symbol",
")",
"data",
"=",
"_load_cached_data",
"(",
"filename",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"'benchmark'",
",",
"environ",
")",
"if",
"data",
"is",
"not",
"None",
":",
"return",
"data",
"# If no cached data was found or it was missing any dates then download the",
"# necessary data.",
"logger",
".",
"info",
"(",
"(",
"'Downloading benchmark data for {symbol!r} '",
"'from {first_date} to {last_date}'",
")",
",",
"symbol",
"=",
"symbol",
",",
"first_date",
"=",
"first_date",
"-",
"trading_day",
",",
"last_date",
"=",
"last_date",
")",
"try",
":",
"data",
"=",
"get_benchmark_returns",
"(",
"symbol",
")",
"data",
".",
"to_csv",
"(",
"get_data_filepath",
"(",
"filename",
",",
"environ",
")",
")",
"except",
"(",
"OSError",
",",
"IOError",
",",
"HTTPError",
")",
":",
"logger",
".",
"exception",
"(",
"'Failed to cache the new benchmark returns'",
")",
"raise",
"if",
"not",
"has_data_for_dates",
"(",
"data",
",",
"first_date",
",",
"last_date",
")",
":",
"logger",
".",
"warn",
"(",
"(",
"\"Still don't have expected benchmark data for {symbol!r} \"",
"\"from {first_date} to {last_date} after redownload!\"",
")",
",",
"symbol",
"=",
"symbol",
",",
"first_date",
"=",
"first_date",
"-",
"trading_day",
",",
"last_date",
"=",
"last_date",
")",
"return",
"data"
] | Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path. | [
"Ensure",
"we",
"have",
"benchmark",
"data",
"for",
"symbol",
"from",
"first_date",
"to",
"last_date"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L169-L229 |
26,056 | quantopian/zipline | zipline/data/loader.py | ensure_treasury_data | def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading treasury data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected treasury data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
return data | python | def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading treasury data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected treasury data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
return data | [
"def",
"ensure_treasury_data",
"(",
"symbol",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"environ",
"=",
"None",
")",
":",
"loader_module",
",",
"filename",
",",
"source",
"=",
"INDEX_MAPPING",
".",
"get",
"(",
"symbol",
",",
"INDEX_MAPPING",
"[",
"'SPY'",
"]",
",",
")",
"first_date",
"=",
"max",
"(",
"first_date",
",",
"loader_module",
".",
"earliest_possible_date",
"(",
")",
")",
"data",
"=",
"_load_cached_data",
"(",
"filename",
",",
"first_date",
",",
"last_date",
",",
"now",
",",
"'treasury'",
",",
"environ",
")",
"if",
"data",
"is",
"not",
"None",
":",
"return",
"data",
"# If no cached data was found or it was missing any dates then download the",
"# necessary data.",
"logger",
".",
"info",
"(",
"(",
"'Downloading treasury data for {symbol!r} '",
"'from {first_date} to {last_date}'",
")",
",",
"symbol",
"=",
"symbol",
",",
"first_date",
"=",
"first_date",
",",
"last_date",
"=",
"last_date",
")",
"try",
":",
"data",
"=",
"loader_module",
".",
"get_treasury_data",
"(",
"first_date",
",",
"last_date",
")",
"data",
".",
"to_csv",
"(",
"get_data_filepath",
"(",
"filename",
",",
"environ",
")",
")",
"except",
"(",
"OSError",
",",
"IOError",
",",
"HTTPError",
")",
":",
"logger",
".",
"exception",
"(",
"'failed to cache treasury data'",
")",
"if",
"not",
"has_data_for_dates",
"(",
"data",
",",
"first_date",
",",
"last_date",
")",
":",
"logger",
".",
"warn",
"(",
"(",
"\"Still don't have expected treasury data for {symbol!r} \"",
"\"from {first_date} to {last_date} after redownload!\"",
")",
",",
"symbol",
"=",
"symbol",
",",
"first_date",
"=",
"first_date",
",",
"last_date",
"=",
"last_date",
")",
"return",
"data"
] | Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path. | [
"Ensure",
"we",
"have",
"treasury",
"data",
"from",
"treasury",
"module",
"associated",
"with",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L232-L292 |
26,057 | quantopian/zipline | zipline/pipeline/graph.py | maybe_specialize | def maybe_specialize(term, domain):
"""Specialize a term if it's loadable.
"""
if isinstance(term, LoadableTerm):
return term.specialize(domain)
return term | python | def maybe_specialize(term, domain):
"""Specialize a term if it's loadable.
"""
if isinstance(term, LoadableTerm):
return term.specialize(domain)
return term | [
"def",
"maybe_specialize",
"(",
"term",
",",
"domain",
")",
":",
"if",
"isinstance",
"(",
"term",
",",
"LoadableTerm",
")",
":",
"return",
"term",
".",
"specialize",
"(",
"domain",
")",
"return",
"term"
] | Specialize a term if it's loadable. | [
"Specialize",
"a",
"term",
"if",
"it",
"s",
"loadable",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L498-L503 |
26,058 | quantopian/zipline | zipline/pipeline/graph.py | TermGraph._add_to_graph | def _add_to_graph(self, term, parents):
"""
Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles.
"""
if self._frozen:
raise ValueError(
"Can't mutate %s after construction." % type(self).__name__
)
# If we've seen this node already as a parent of the current traversal,
# it means we have an unsatisifiable dependency. This should only be
# possible if the term's inputs are mutated after construction.
if term in parents:
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term) | python | def _add_to_graph(self, term, parents):
"""
Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles.
"""
if self._frozen:
raise ValueError(
"Can't mutate %s after construction." % type(self).__name__
)
# If we've seen this node already as a parent of the current traversal,
# it means we have an unsatisifiable dependency. This should only be
# possible if the term's inputs are mutated after construction.
if term in parents:
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term) | [
"def",
"_add_to_graph",
"(",
"self",
",",
"term",
",",
"parents",
")",
":",
"if",
"self",
".",
"_frozen",
":",
"raise",
"ValueError",
"(",
"\"Can't mutate %s after construction.\"",
"%",
"type",
"(",
"self",
")",
".",
"__name__",
")",
"# If we've seen this node already as a parent of the current traversal,",
"# it means we have an unsatisifiable dependency. This should only be",
"# possible if the term's inputs are mutated after construction.",
"if",
"term",
"in",
"parents",
":",
"raise",
"CyclicDependency",
"(",
"term",
")",
"parents",
".",
"add",
"(",
"term",
")",
"self",
".",
"graph",
".",
"add_node",
"(",
"term",
")",
"for",
"dependency",
"in",
"term",
".",
"dependencies",
":",
"self",
".",
"_add_to_graph",
"(",
"dependency",
",",
"parents",
")",
"self",
".",
"graph",
".",
"add_edge",
"(",
"dependency",
",",
"term",
")",
"parents",
".",
"remove",
"(",
"term",
")"
] | Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles. | [
"Add",
"a",
"term",
"and",
"all",
"its",
"children",
"to",
"graph",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L69-L95 |
26,059 | quantopian/zipline | zipline/pipeline/graph.py | TermGraph.execution_order | def execution_order(self, refcounts):
"""
Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed.
"""
return iter(nx.topological_sort(
self.graph.subgraph(
{term for term, refcount in refcounts.items() if refcount > 0},
),
)) | python | def execution_order(self, refcounts):
"""
Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed.
"""
return iter(nx.topological_sort(
self.graph.subgraph(
{term for term, refcount in refcounts.items() if refcount > 0},
),
)) | [
"def",
"execution_order",
"(",
"self",
",",
"refcounts",
")",
":",
"return",
"iter",
"(",
"nx",
".",
"topological_sort",
"(",
"self",
".",
"graph",
".",
"subgraph",
"(",
"{",
"term",
"for",
"term",
",",
"refcount",
"in",
"refcounts",
".",
"items",
"(",
")",
"if",
"refcount",
">",
"0",
"}",
",",
")",
",",
")",
")"
] | Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed. | [
"Return",
"a",
"topologically",
"-",
"sorted",
"iterator",
"over",
"the",
"terms",
"in",
"self",
"which",
"need",
"to",
"be",
"computed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L110-L119 |
26,060 | quantopian/zipline | zipline/pipeline/graph.py | TermGraph.initial_refcounts | def initial_refcounts(self, initial_terms):
"""
Calculate initial refcounts for execution of this graph.
Parameters
----------
initial_terms : iterable[Term]
An iterable of terms that were pre-computed before graph execution.
Each node starts with a refcount equal to its outdegree, and output
nodes get one extra reference to ensure that they're still in the graph
at the end of execution.
"""
refcounts = self.graph.out_degree()
for t in self.outputs.values():
refcounts[t] += 1
for t in initial_terms:
self._decref_dependencies_recursive(t, refcounts, set())
return refcounts | python | def initial_refcounts(self, initial_terms):
"""
Calculate initial refcounts for execution of this graph.
Parameters
----------
initial_terms : iterable[Term]
An iterable of terms that were pre-computed before graph execution.
Each node starts with a refcount equal to its outdegree, and output
nodes get one extra reference to ensure that they're still in the graph
at the end of execution.
"""
refcounts = self.graph.out_degree()
for t in self.outputs.values():
refcounts[t] += 1
for t in initial_terms:
self._decref_dependencies_recursive(t, refcounts, set())
return refcounts | [
"def",
"initial_refcounts",
"(",
"self",
",",
"initial_terms",
")",
":",
"refcounts",
"=",
"self",
".",
"graph",
".",
"out_degree",
"(",
")",
"for",
"t",
"in",
"self",
".",
"outputs",
".",
"values",
"(",
")",
":",
"refcounts",
"[",
"t",
"]",
"+=",
"1",
"for",
"t",
"in",
"initial_terms",
":",
"self",
".",
"_decref_dependencies_recursive",
"(",
"t",
",",
"refcounts",
",",
"set",
"(",
")",
")",
"return",
"refcounts"
] | Calculate initial refcounts for execution of this graph.
Parameters
----------
initial_terms : iterable[Term]
An iterable of terms that were pre-computed before graph execution.
Each node starts with a refcount equal to its outdegree, and output
nodes get one extra reference to ensure that they're still in the graph
at the end of execution. | [
"Calculate",
"initial",
"refcounts",
"for",
"execution",
"of",
"this",
"graph",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L143-L163 |
26,061 | quantopian/zipline | zipline/pipeline/graph.py | TermGraph._decref_dependencies_recursive | def _decref_dependencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage) | python | def _decref_dependencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage) | [
"def",
"_decref_dependencies_recursive",
"(",
"self",
",",
"term",
",",
"refcounts",
",",
"garbage",
")",
":",
"# Edges are tuple of (from, to).",
"for",
"parent",
",",
"_",
"in",
"self",
".",
"graph",
".",
"in_edges",
"(",
"[",
"term",
"]",
")",
":",
"refcounts",
"[",
"parent",
"]",
"-=",
"1",
"# No one else depends on this term. Remove it from the",
"# workspace to conserve memory.",
"if",
"refcounts",
"[",
"parent",
"]",
"==",
"0",
":",
"garbage",
".",
"add",
"(",
"parent",
")",
"self",
".",
"_decref_dependencies_recursive",
"(",
"parent",
",",
"refcounts",
",",
"garbage",
")"
] | Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies` | [
"Decrement",
"terms",
"recursively",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L165-L182 |
26,062 | quantopian/zipline | zipline/pipeline/graph.py | TermGraph.decref_dependencies | def decref_dependencies(self, term, refcounts):
"""
Decrement in-edges for ``term`` after computation.
Parameters
----------
term : zipline.pipeline.Term
The term whose parents should be decref'ed.
refcounts : dict[Term -> int]
Dictionary of refcounts.
Return
------
garbage : set[Term]
Terms whose refcounts hit zero after decrefing.
"""
garbage = set()
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
return garbage | python | def decref_dependencies(self, term, refcounts):
"""
Decrement in-edges for ``term`` after computation.
Parameters
----------
term : zipline.pipeline.Term
The term whose parents should be decref'ed.
refcounts : dict[Term -> int]
Dictionary of refcounts.
Return
------
garbage : set[Term]
Terms whose refcounts hit zero after decrefing.
"""
garbage = set()
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
return garbage | [
"def",
"decref_dependencies",
"(",
"self",
",",
"term",
",",
"refcounts",
")",
":",
"garbage",
"=",
"set",
"(",
")",
"# Edges are tuple of (from, to).",
"for",
"parent",
",",
"_",
"in",
"self",
".",
"graph",
".",
"in_edges",
"(",
"[",
"term",
"]",
")",
":",
"refcounts",
"[",
"parent",
"]",
"-=",
"1",
"# No one else depends on this term. Remove it from the",
"# workspace to conserve memory.",
"if",
"refcounts",
"[",
"parent",
"]",
"==",
"0",
":",
"garbage",
".",
"add",
"(",
"parent",
")",
"return",
"garbage"
] | Decrement in-edges for ``term`` after computation.
Parameters
----------
term : zipline.pipeline.Term
The term whose parents should be decref'ed.
refcounts : dict[Term -> int]
Dictionary of refcounts.
Return
------
garbage : set[Term]
Terms whose refcounts hit zero after decrefing. | [
"Decrement",
"in",
"-",
"edges",
"for",
"term",
"after",
"computation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L184-L208 |
26,063 | quantopian/zipline | zipline/pipeline/graph.py | ExecutionPlan._ensure_extra_rows | def _ensure_extra_rows(self, term, N):
"""
Ensure that we're going to compute at least N extra rows of `term`.
"""
attrs = self.graph.node[term]
attrs['extra_rows'] = max(N, attrs.get('extra_rows', 0)) | python | def _ensure_extra_rows(self, term, N):
"""
Ensure that we're going to compute at least N extra rows of `term`.
"""
attrs = self.graph.node[term]
attrs['extra_rows'] = max(N, attrs.get('extra_rows', 0)) | [
"def",
"_ensure_extra_rows",
"(",
"self",
",",
"term",
",",
"N",
")",
":",
"attrs",
"=",
"self",
".",
"graph",
".",
"node",
"[",
"term",
"]",
"attrs",
"[",
"'extra_rows'",
"]",
"=",
"max",
"(",
"N",
",",
"attrs",
".",
"get",
"(",
"'extra_rows'",
",",
"0",
")",
")"
] | Ensure that we're going to compute at least N extra rows of `term`. | [
"Ensure",
"that",
"we",
"re",
"going",
"to",
"compute",
"at",
"least",
"N",
"extra",
"rows",
"of",
"term",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L444-L449 |
26,064 | quantopian/zipline | zipline/pipeline/graph.py | ExecutionPlan.mask_and_dates_for_term | def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] | python | def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] | [
"def",
"mask_and_dates_for_term",
"(",
"self",
",",
"term",
",",
"root_mask_term",
",",
"workspace",
",",
"all_dates",
")",
":",
"mask",
"=",
"term",
".",
"mask",
"mask_offset",
"=",
"self",
".",
"extra_rows",
"[",
"mask",
"]",
"-",
"self",
".",
"extra_rows",
"[",
"term",
"]",
"# This offset is computed against root_mask_term because that is what",
"# determines the shape of the top-level dates array.",
"dates_offset",
"=",
"(",
"self",
".",
"extra_rows",
"[",
"root_mask_term",
"]",
"-",
"self",
".",
"extra_rows",
"[",
"term",
"]",
")",
"return",
"workspace",
"[",
"mask",
"]",
"[",
"mask_offset",
":",
"]",
",",
"all_dates",
"[",
"dates_offset",
":",
"]"
] | Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term. | [
"Load",
"mask",
"and",
"mask",
"row",
"labels",
"for",
"term",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L451-L486 |
26,065 | quantopian/zipline | zipline/pipeline/graph.py | ExecutionPlan._assert_all_loadable_terms_specialized_to | def _assert_all_loadable_terms_specialized_to(self, domain):
"""Make sure that we've specialized all loadable terms in the graph.
"""
for term in self.graph.node:
if isinstance(term, LoadableTerm):
assert term.domain is domain | python | def _assert_all_loadable_terms_specialized_to(self, domain):
"""Make sure that we've specialized all loadable terms in the graph.
"""
for term in self.graph.node:
if isinstance(term, LoadableTerm):
assert term.domain is domain | [
"def",
"_assert_all_loadable_terms_specialized_to",
"(",
"self",
",",
"domain",
")",
":",
"for",
"term",
"in",
"self",
".",
"graph",
".",
"node",
":",
"if",
"isinstance",
"(",
"term",
",",
"LoadableTerm",
")",
":",
"assert",
"term",
".",
"domain",
"is",
"domain"
] | Make sure that we've specialized all loadable terms in the graph. | [
"Make",
"sure",
"that",
"we",
"ve",
"specialized",
"all",
"loadable",
"terms",
"in",
"the",
"graph",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L488-L493 |
26,066 | quantopian/zipline | setup.py | window_specialization | def window_specialization(typename):
"""Make an extension for an AdjustedArrayWindow specialization."""
return Extension(
'zipline.lib._{name}window'.format(name=typename),
['zipline/lib/_{name}window.pyx'.format(name=typename)],
depends=['zipline/lib/_windowtemplate.pxi'],
) | python | def window_specialization(typename):
"""Make an extension for an AdjustedArrayWindow specialization."""
return Extension(
'zipline.lib._{name}window'.format(name=typename),
['zipline/lib/_{name}window.pyx'.format(name=typename)],
depends=['zipline/lib/_windowtemplate.pxi'],
) | [
"def",
"window_specialization",
"(",
"typename",
")",
":",
"return",
"Extension",
"(",
"'zipline.lib._{name}window'",
".",
"format",
"(",
"name",
"=",
"typename",
")",
",",
"[",
"'zipline/lib/_{name}window.pyx'",
".",
"format",
"(",
"name",
"=",
"typename",
")",
"]",
",",
"depends",
"=",
"[",
"'zipline/lib/_windowtemplate.pxi'",
"]",
",",
")"
] | Make an extension for an AdjustedArrayWindow specialization. | [
"Make",
"an",
"extension",
"for",
"an",
"AdjustedArrayWindow",
"specialization",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/setup.py#L81-L87 |
26,067 | quantopian/zipline | setup.py | read_requirements | def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs) | python | def read_requirements(path,
strict_bounds,
conda_format=False,
filter_names=None):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
reqs = _filter_requirements(f.readlines(), filter_names=filter_names,
filter_sys_version=not conda_format)
if not strict_bounds:
reqs = map(_with_bounds, reqs)
if conda_format:
reqs = map(_conda_format, reqs)
return list(reqs) | [
"def",
"read_requirements",
"(",
"path",
",",
"strict_bounds",
",",
"conda_format",
"=",
"False",
",",
"filter_names",
"=",
"None",
")",
":",
"real_path",
"=",
"join",
"(",
"dirname",
"(",
"abspath",
"(",
"__file__",
")",
")",
",",
"path",
")",
"with",
"open",
"(",
"real_path",
")",
"as",
"f",
":",
"reqs",
"=",
"_filter_requirements",
"(",
"f",
".",
"readlines",
"(",
")",
",",
"filter_names",
"=",
"filter_names",
",",
"filter_sys_version",
"=",
"not",
"conda_format",
")",
"if",
"not",
"strict_bounds",
":",
"reqs",
"=",
"map",
"(",
"_with_bounds",
",",
"reqs",
")",
"if",
"conda_format",
":",
"reqs",
"=",
"map",
"(",
"_conda_format",
",",
"reqs",
")",
"return",
"list",
"(",
"reqs",
")"
] | Read a requirements.txt file, expressed as a path relative to Zipline root.
Returns requirements with the pinned versions as lower bounds
if `strict_bounds` is falsey. | [
"Read",
"a",
"requirements",
".",
"txt",
"file",
"expressed",
"as",
"a",
"path",
"relative",
"to",
"Zipline",
"root",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/setup.py#L217-L238 |
26,068 | quantopian/zipline | zipline/utils/events.py | ensure_utc | def ensure_utc(time, tz='UTC'):
"""
Normalize a time. If the time is tz-naive, assume it is UTC.
"""
if not time.tzinfo:
time = time.replace(tzinfo=pytz.timezone(tz))
return time.replace(tzinfo=pytz.utc) | python | def ensure_utc(time, tz='UTC'):
"""
Normalize a time. If the time is tz-naive, assume it is UTC.
"""
if not time.tzinfo:
time = time.replace(tzinfo=pytz.timezone(tz))
return time.replace(tzinfo=pytz.utc) | [
"def",
"ensure_utc",
"(",
"time",
",",
"tz",
"=",
"'UTC'",
")",
":",
"if",
"not",
"time",
".",
"tzinfo",
":",
"time",
"=",
"time",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"timezone",
"(",
"tz",
")",
")",
"return",
"time",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")"
] | Normalize a time. If the time is tz-naive, assume it is UTC. | [
"Normalize",
"a",
"time",
".",
"If",
"the",
"time",
"is",
"tz",
"-",
"naive",
"assume",
"it",
"is",
"UTC",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L72-L78 |
26,069 | quantopian/zipline | zipline/utils/events.py | _build_offset | def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError('Cannot pass kwargs and an offset')
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") | python | def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError('Cannot pass kwargs and an offset')
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") | [
"def",
"_build_offset",
"(",
"offset",
",",
"kwargs",
",",
"default",
")",
":",
"if",
"offset",
"is",
"None",
":",
"if",
"not",
"kwargs",
":",
"return",
"default",
"# use the default.",
"else",
":",
"return",
"_td_check",
"(",
"datetime",
".",
"timedelta",
"(",
"*",
"*",
"kwargs",
")",
")",
"elif",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Cannot pass kwargs and an offset'",
")",
"elif",
"isinstance",
"(",
"offset",
",",
"datetime",
".",
"timedelta",
")",
":",
"return",
"_td_check",
"(",
"offset",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Must pass 'hours' and/or 'minutes' as keywords\"",
")"
] | Builds the offset argument for event rules. | [
"Builds",
"the",
"offset",
"argument",
"for",
"event",
"rules",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L108-L122 |
26,070 | quantopian/zipline | zipline/utils/events.py | _build_date | def _build_date(date, kwargs):
"""
Builds the date argument for event rules.
"""
if date is None:
if not kwargs:
raise ValueError('Must pass a date or kwargs')
else:
return datetime.date(**kwargs)
elif kwargs:
raise ValueError('Cannot pass kwargs and a date')
else:
return date | python | def _build_date(date, kwargs):
"""
Builds the date argument for event rules.
"""
if date is None:
if not kwargs:
raise ValueError('Must pass a date or kwargs')
else:
return datetime.date(**kwargs)
elif kwargs:
raise ValueError('Cannot pass kwargs and a date')
else:
return date | [
"def",
"_build_date",
"(",
"date",
",",
"kwargs",
")",
":",
"if",
"date",
"is",
"None",
":",
"if",
"not",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Must pass a date or kwargs'",
")",
"else",
":",
"return",
"datetime",
".",
"date",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Cannot pass kwargs and a date'",
")",
"else",
":",
"return",
"date"
] | Builds the date argument for event rules. | [
"Builds",
"the",
"date",
"argument",
"for",
"event",
"rules",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L125-L138 |
26,071 | quantopian/zipline | zipline/utils/events.py | _build_time | def _build_time(time, kwargs):
"""
Builds the time argument for event rules.
"""
tz = kwargs.pop('tz', 'UTC')
if time:
if kwargs:
raise ValueError('Cannot pass kwargs and a time')
else:
return ensure_utc(time, tz)
elif not kwargs:
raise ValueError('Must pass a time or kwargs')
else:
return datetime.time(**kwargs) | python | def _build_time(time, kwargs):
"""
Builds the time argument for event rules.
"""
tz = kwargs.pop('tz', 'UTC')
if time:
if kwargs:
raise ValueError('Cannot pass kwargs and a time')
else:
return ensure_utc(time, tz)
elif not kwargs:
raise ValueError('Must pass a time or kwargs')
else:
return datetime.time(**kwargs) | [
"def",
"_build_time",
"(",
"time",
",",
"kwargs",
")",
":",
"tz",
"=",
"kwargs",
".",
"pop",
"(",
"'tz'",
",",
"'UTC'",
")",
"if",
"time",
":",
"if",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Cannot pass kwargs and a time'",
")",
"else",
":",
"return",
"ensure_utc",
"(",
"time",
",",
"tz",
")",
"elif",
"not",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Must pass a time or kwargs'",
")",
"else",
":",
"return",
"datetime",
".",
"time",
"(",
"*",
"*",
"kwargs",
")"
] | Builds the time argument for event rules. | [
"Builds",
"the",
"time",
"argument",
"for",
"event",
"rules",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L141-L154 |
26,072 | quantopian/zipline | zipline/utils/events.py | lossless_float_to_int | def lossless_float_to_int(funcname, func, argname, arg):
"""
A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError.
"""
if not isinstance(arg, float):
return arg
arg_as_int = int(arg)
if arg == arg_as_int:
warnings.warn(
"{f} expected an int for argument {name!r}, but got float {arg}."
" Coercing to int.".format(
f=funcname,
name=argname,
arg=arg,
),
)
return arg_as_int
raise TypeError(arg) | python | def lossless_float_to_int(funcname, func, argname, arg):
"""
A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError.
"""
if not isinstance(arg, float):
return arg
arg_as_int = int(arg)
if arg == arg_as_int:
warnings.warn(
"{f} expected an int for argument {name!r}, but got float {arg}."
" Coercing to int.".format(
f=funcname,
name=argname,
arg=arg,
),
)
return arg_as_int
raise TypeError(arg) | [
"def",
"lossless_float_to_int",
"(",
"funcname",
",",
"func",
",",
"argname",
",",
"arg",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"float",
")",
":",
"return",
"arg",
"arg_as_int",
"=",
"int",
"(",
"arg",
")",
"if",
"arg",
"==",
"arg_as_int",
":",
"warnings",
".",
"warn",
"(",
"\"{f} expected an int for argument {name!r}, but got float {arg}.\"",
"\" Coercing to int.\"",
".",
"format",
"(",
"f",
"=",
"funcname",
",",
"name",
"=",
"argname",
",",
"arg",
"=",
"arg",
",",
")",
",",
")",
"return",
"arg_as_int",
"raise",
"TypeError",
"(",
"arg",
")"
] | A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError. | [
"A",
"preprocessor",
"that",
"coerces",
"integral",
"floats",
"to",
"ints",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L158-L179 |
26,073 | quantopian/zipline | zipline/utils/events.py | EventManager.add_event | def add_event(self, event, prepend=False):
"""
Adds an event to the manager.
"""
if prepend:
self._events.insert(0, event)
else:
self._events.append(event) | python | def add_event(self, event, prepend=False):
"""
Adds an event to the manager.
"""
if prepend:
self._events.insert(0, event)
else:
self._events.append(event) | [
"def",
"add_event",
"(",
"self",
",",
"event",
",",
"prepend",
"=",
"False",
")",
":",
"if",
"prepend",
":",
"self",
".",
"_events",
".",
"insert",
"(",
"0",
",",
"event",
")",
"else",
":",
"self",
".",
"_events",
".",
"append",
"(",
"event",
")"
] | Adds an event to the manager. | [
"Adds",
"an",
"event",
"to",
"the",
"manager",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L201-L208 |
26,074 | quantopian/zipline | zipline/utils/events.py | Event.handle_data | def handle_data(self, context, data, dt):
"""
Calls the callable only when the rule is triggered.
"""
if self.rule.should_trigger(dt):
self.callback(context, data) | python | def handle_data(self, context, data, dt):
"""
Calls the callable only when the rule is triggered.
"""
if self.rule.should_trigger(dt):
self.callback(context, data) | [
"def",
"handle_data",
"(",
"self",
",",
"context",
",",
"data",
",",
"dt",
")",
":",
"if",
"self",
".",
"rule",
".",
"should_trigger",
"(",
"dt",
")",
":",
"self",
".",
"callback",
"(",
"context",
",",
"data",
")"
] | Calls the callable only when the rule is triggered. | [
"Calls",
"the",
"callable",
"only",
"when",
"the",
"rule",
"is",
"triggered",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L230-L235 |
26,075 | quantopian/zipline | zipline/utils/events.py | ComposedRule.should_trigger | def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger,
self.second.should_trigger,
dt
) | python | def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger,
self.second.should_trigger,
dt
) | [
"def",
"should_trigger",
"(",
"self",
",",
"dt",
")",
":",
"return",
"self",
".",
"composer",
"(",
"self",
".",
"first",
".",
"should_trigger",
",",
"self",
".",
"second",
".",
"should_trigger",
",",
"dt",
")"
] | Composes the two rules with a lazy composer. | [
"Composes",
"the",
"two",
"rules",
"with",
"a",
"lazy",
"composer",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/events.py#L298-L306 |
26,076 | quantopian/zipline | zipline/data/bcolz_daily_bars.py | winsorise_uint32 | def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df | python | def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df | [
"def",
"winsorise_uint32",
"(",
"df",
",",
"invalid_data_behavior",
",",
"column",
",",
"*",
"columns",
")",
":",
"columns",
"=",
"list",
"(",
"(",
"column",
",",
")",
"+",
"columns",
")",
"mask",
"=",
"df",
"[",
"columns",
"]",
">",
"UINT32_MAX",
"if",
"invalid_data_behavior",
"!=",
"'ignore'",
":",
"mask",
"|=",
"df",
"[",
"columns",
"]",
".",
"isnull",
"(",
")",
"else",
":",
"# we are not going to generate a warning or error for this so just use",
"# nan_to_num",
"df",
"[",
"columns",
"]",
"=",
"np",
".",
"nan_to_num",
"(",
"df",
"[",
"columns",
"]",
")",
"mv",
"=",
"mask",
".",
"values",
"if",
"mv",
".",
"any",
"(",
")",
":",
"if",
"invalid_data_behavior",
"==",
"'raise'",
":",
"raise",
"ValueError",
"(",
"'%d values out of bounds for uint32: %r'",
"%",
"(",
"mv",
".",
"sum",
"(",
")",
",",
"df",
"[",
"mask",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
",",
")",
",",
")",
"if",
"invalid_data_behavior",
"==",
"'warn'",
":",
"warnings",
".",
"warn",
"(",
"'Ignoring %d values because they are out of bounds for'",
"' uint32: %r'",
"%",
"(",
"mv",
".",
"sum",
"(",
")",
",",
"df",
"[",
"mask",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
",",
")",
",",
"stacklevel",
"=",
"3",
",",
"# one extra frame for `expect_element`",
")",
"df",
"[",
"mask",
"]",
"=",
"0",
"return",
"df"
] | Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out. | [
"Drops",
"any",
"record",
"where",
"a",
"value",
"would",
"not",
"fit",
"into",
"a",
"uint32",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L69-L114 |
26,077 | quantopian/zipline | zipline/data/bcolz_daily_bars.py | BcolzDailyBarWriter.write_csvs | def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
) | python | def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
) | [
"def",
"write_csvs",
"(",
"self",
",",
"asset_map",
",",
"show_progress",
"=",
"False",
",",
"invalid_data_behavior",
"=",
"'warn'",
")",
":",
"read",
"=",
"partial",
"(",
"read_csv",
",",
"parse_dates",
"=",
"[",
"'day'",
"]",
",",
"index_col",
"=",
"'day'",
",",
"dtype",
"=",
"self",
".",
"_csv_dtypes",
",",
")",
"return",
"self",
".",
"write",
"(",
"(",
"(",
"asset",
",",
"read",
"(",
"path",
")",
")",
"for",
"asset",
",",
"path",
"in",
"iteritems",
"(",
"asset_map",
")",
")",
",",
"assets",
"=",
"viewkeys",
"(",
"asset_map",
")",
",",
"show_progress",
"=",
"show_progress",
",",
"invalid_data_behavior",
"=",
"invalid_data_behavior",
",",
")"
] | Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32. | [
"Read",
"CSVs",
"as",
"DataFrames",
"from",
"our",
"asset",
"map",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L209-L237 |
26,078 | quantopian/zipline | zipline/data/bcolz_daily_bars.py | BcolzDailyBarReader._compute_slices | def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
) | python | def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
) | [
"def",
"_compute_slices",
"(",
"self",
",",
"start_idx",
",",
"end_idx",
",",
"assets",
")",
":",
"# The core implementation of the logic here is implemented in Cython",
"# for efficiency.",
"return",
"_compute_row_slices",
"(",
"self",
".",
"_first_rows",
",",
"self",
".",
"_last_rows",
",",
"self",
".",
"_calendar_offsets",
",",
"start_idx",
",",
"end_idx",
",",
"assets",
",",
")"
] | Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist. | [
"Compute",
"the",
"raw",
"row",
"indices",
"to",
"load",
"for",
"each",
"asset",
"on",
"a",
"query",
"for",
"the",
"given",
"dates",
"after",
"applying",
"a",
"shift",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L530-L570 |
26,079 | quantopian/zipline | zipline/data/bcolz_daily_bars.py | BcolzDailyBarReader._spot_col | def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col | python | def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col | [
"def",
"_spot_col",
"(",
"self",
",",
"colname",
")",
":",
"try",
":",
"col",
"=",
"self",
".",
"_spot_cols",
"[",
"colname",
"]",
"except",
"KeyError",
":",
"col",
"=",
"self",
".",
"_spot_cols",
"[",
"colname",
"]",
"=",
"self",
".",
"_table",
"[",
"colname",
"]",
"return",
"col"
] | Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname. | [
"Get",
"the",
"colname",
"from",
"daily_bar_table",
"and",
"read",
"all",
"of",
"it",
"into",
"memory",
"caching",
"the",
"result",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bcolz_daily_bars.py#L598-L618 |
26,080 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.init_engine | def init_engine(self, get_loader):
"""
Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine
"""
if get_loader is not None:
self.engine = SimplePipelineEngine(
get_loader,
self.asset_finder,
self.default_pipeline_domain(self.trading_calendar),
)
else:
self.engine = ExplodingPipelineEngine() | python | def init_engine(self, get_loader):
"""
Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine
"""
if get_loader is not None:
self.engine = SimplePipelineEngine(
get_loader,
self.asset_finder,
self.default_pipeline_domain(self.trading_calendar),
)
else:
self.engine = ExplodingPipelineEngine() | [
"def",
"init_engine",
"(",
"self",
",",
"get_loader",
")",
":",
"if",
"get_loader",
"is",
"not",
"None",
":",
"self",
".",
"engine",
"=",
"SimplePipelineEngine",
"(",
"get_loader",
",",
"self",
".",
"asset_finder",
",",
"self",
".",
"default_pipeline_domain",
"(",
"self",
".",
"trading_calendar",
")",
",",
")",
"else",
":",
"self",
".",
"engine",
"=",
"ExplodingPipelineEngine",
"(",
")"
] | Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine | [
"Construct",
"and",
"store",
"a",
"PipelineEngine",
"from",
"loader",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L408-L421 |
26,081 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.initialize | def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs) | python | def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs) | [
"def",
"initialize",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"ZiplineAPI",
"(",
"self",
")",
":",
"self",
".",
"_initialize",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Call self._initialize with `self` made available to Zipline API
functions. | [
"Call",
"self",
".",
"_initialize",
"with",
"self",
"made",
"available",
"to",
"Zipline",
"API",
"functions",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L423-L429 |
26,082 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm._create_clock | def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
) | python | def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
) | [
"def",
"_create_clock",
"(",
"self",
")",
":",
"trading_o_and_c",
"=",
"self",
".",
"trading_calendar",
".",
"schedule",
".",
"ix",
"[",
"self",
".",
"sim_params",
".",
"sessions",
"]",
"market_closes",
"=",
"trading_o_and_c",
"[",
"'market_close'",
"]",
"minutely_emission",
"=",
"False",
"if",
"self",
".",
"sim_params",
".",
"data_frequency",
"==",
"'minute'",
":",
"market_opens",
"=",
"trading_o_and_c",
"[",
"'market_open'",
"]",
"minutely_emission",
"=",
"self",
".",
"sim_params",
".",
"emission_rate",
"==",
"\"minute\"",
"# The calendar's execution times are the minutes over which we",
"# actually want to run the clock. Typically the execution times",
"# simply adhere to the market open and close times. In the case of",
"# the futures calendar, for example, we only want to simulate over",
"# a subset of the full 24 hour calendar, so the execution times",
"# dictate a market open time of 6:31am US/Eastern and a close of",
"# 5:00pm US/Eastern.",
"execution_opens",
"=",
"self",
".",
"trading_calendar",
".",
"execution_time_from_open",
"(",
"market_opens",
")",
"execution_closes",
"=",
"self",
".",
"trading_calendar",
".",
"execution_time_from_close",
"(",
"market_closes",
")",
"else",
":",
"# in daily mode, we want to have one bar per session, timestamped",
"# as the last minute of the session.",
"execution_closes",
"=",
"self",
".",
"trading_calendar",
".",
"execution_time_from_close",
"(",
"market_closes",
")",
"execution_opens",
"=",
"execution_closes",
"# FIXME generalize these values",
"before_trading_start_minutes",
"=",
"days_at_time",
"(",
"self",
".",
"sim_params",
".",
"sessions",
",",
"time",
"(",
"8",
",",
"45",
")",
",",
"\"US/Eastern\"",
")",
"return",
"MinuteSimulationClock",
"(",
"self",
".",
"sim_params",
".",
"sessions",
",",
"execution_opens",
",",
"execution_closes",
",",
"before_trading_start_minutes",
",",
"minute_emission",
"=",
"minutely_emission",
",",
")"
] | If the clock property is not set, then create one based on frequency. | [
"If",
"the",
"clock",
"property",
"is",
"not",
"set",
"then",
"create",
"one",
"based",
"on",
"frequency",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L482-L526 |
26,083 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.compute_eager_pipelines | def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name) | python | def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name) | [
"def",
"compute_eager_pipelines",
"(",
"self",
")",
":",
"for",
"name",
",",
"pipe",
"in",
"self",
".",
"_pipelines",
".",
"items",
"(",
")",
":",
"if",
"pipe",
".",
"eager",
":",
"self",
".",
"pipeline_output",
"(",
"name",
")"
] | Compute any pipelines attached with eager=True. | [
"Compute",
"any",
"pipelines",
"attached",
"with",
"eager",
"=",
"True",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L601-L607 |
26,084 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.run | def run(self, data_portal=None):
"""Run the algorithm.
"""
# HACK: I don't think we really want to support passing a data portal
# this late in the long term, but this is needed for now for backwards
# compat downstream.
if data_portal is not None:
self.data_portal = data_portal
self.asset_finder = data_portal.asset_finder
elif self.data_portal is None:
raise RuntimeError(
"No data portal in TradingAlgorithm.run().\n"
"Either pass a DataPortal to TradingAlgorithm() or to run()."
)
else:
assert self.asset_finder is not None, \
"Have data portal without asset_finder."
# Create zipline and loop through simulated_trading.
# Each iteration returns a perf dictionary
try:
perfs = []
for perf in self.get_generator():
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
finally:
self.data_portal = None
self.metrics_tracker = None
return daily_stats | python | def run(self, data_portal=None):
"""Run the algorithm.
"""
# HACK: I don't think we really want to support passing a data portal
# this late in the long term, but this is needed for now for backwards
# compat downstream.
if data_portal is not None:
self.data_portal = data_portal
self.asset_finder = data_portal.asset_finder
elif self.data_portal is None:
raise RuntimeError(
"No data portal in TradingAlgorithm.run().\n"
"Either pass a DataPortal to TradingAlgorithm() or to run()."
)
else:
assert self.asset_finder is not None, \
"Have data portal without asset_finder."
# Create zipline and loop through simulated_trading.
# Each iteration returns a perf dictionary
try:
perfs = []
for perf in self.get_generator():
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
finally:
self.data_portal = None
self.metrics_tracker = None
return daily_stats | [
"def",
"run",
"(",
"self",
",",
"data_portal",
"=",
"None",
")",
":",
"# HACK: I don't think we really want to support passing a data portal",
"# this late in the long term, but this is needed for now for backwards",
"# compat downstream.",
"if",
"data_portal",
"is",
"not",
"None",
":",
"self",
".",
"data_portal",
"=",
"data_portal",
"self",
".",
"asset_finder",
"=",
"data_portal",
".",
"asset_finder",
"elif",
"self",
".",
"data_portal",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"No data portal in TradingAlgorithm.run().\\n\"",
"\"Either pass a DataPortal to TradingAlgorithm() or to run().\"",
")",
"else",
":",
"assert",
"self",
".",
"asset_finder",
"is",
"not",
"None",
",",
"\"Have data portal without asset_finder.\"",
"# Create zipline and loop through simulated_trading.",
"# Each iteration returns a perf dictionary",
"try",
":",
"perfs",
"=",
"[",
"]",
"for",
"perf",
"in",
"self",
".",
"get_generator",
"(",
")",
":",
"perfs",
".",
"append",
"(",
"perf",
")",
"# convert perf dict to pandas dataframe",
"daily_stats",
"=",
"self",
".",
"_create_daily_stats",
"(",
"perfs",
")",
"self",
".",
"analyze",
"(",
"daily_stats",
")",
"finally",
":",
"self",
".",
"data_portal",
"=",
"None",
"self",
".",
"metrics_tracker",
"=",
"None",
"return",
"daily_stats"
] | Run the algorithm. | [
"Run",
"the",
"algorithm",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L617-L650 |
26,085 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.calculate_capital_changes | def calculate_capital_changes(self, dt, emission_rate, is_interday,
portfolio_value_adjustment=0.0):
"""
If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes.
"""
try:
capital_change = self.capital_changes[dt]
except KeyError:
return
self._sync_last_sale_prices()
if capital_change['type'] == 'target':
target = capital_change['value']
capital_change_amount = (
target -
(
self.portfolio.portfolio_value -
portfolio_value_adjustment
)
)
log.info('Processing capital change to target %s at %s. Capital '
'change delta is %s' % (target, dt,
capital_change_amount))
elif capital_change['type'] == 'delta':
target = None
capital_change_amount = capital_change['value']
log.info('Processing capital change of delta %s at %s'
% (capital_change_amount, dt))
else:
log.error("Capital change %s does not indicate a valid type "
"('target' or 'delta')" % capital_change)
return
self.capital_change_deltas.update({dt: capital_change_amount})
self.metrics_tracker.capital_change(capital_change_amount)
yield {
'capital_change':
{'date': dt,
'type': 'cash',
'target': target,
'delta': capital_change_amount}
} | python | def calculate_capital_changes(self, dt, emission_rate, is_interday,
portfolio_value_adjustment=0.0):
"""
If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes.
"""
try:
capital_change = self.capital_changes[dt]
except KeyError:
return
self._sync_last_sale_prices()
if capital_change['type'] == 'target':
target = capital_change['value']
capital_change_amount = (
target -
(
self.portfolio.portfolio_value -
portfolio_value_adjustment
)
)
log.info('Processing capital change to target %s at %s. Capital '
'change delta is %s' % (target, dt,
capital_change_amount))
elif capital_change['type'] == 'delta':
target = None
capital_change_amount = capital_change['value']
log.info('Processing capital change of delta %s at %s'
% (capital_change_amount, dt))
else:
log.error("Capital change %s does not indicate a valid type "
"('target' or 'delta')" % capital_change)
return
self.capital_change_deltas.update({dt: capital_change_amount})
self.metrics_tracker.capital_change(capital_change_amount)
yield {
'capital_change':
{'date': dt,
'type': 'cash',
'target': target,
'delta': capital_change_amount}
} | [
"def",
"calculate_capital_changes",
"(",
"self",
",",
"dt",
",",
"emission_rate",
",",
"is_interday",
",",
"portfolio_value_adjustment",
"=",
"0.0",
")",
":",
"try",
":",
"capital_change",
"=",
"self",
".",
"capital_changes",
"[",
"dt",
"]",
"except",
"KeyError",
":",
"return",
"self",
".",
"_sync_last_sale_prices",
"(",
")",
"if",
"capital_change",
"[",
"'type'",
"]",
"==",
"'target'",
":",
"target",
"=",
"capital_change",
"[",
"'value'",
"]",
"capital_change_amount",
"=",
"(",
"target",
"-",
"(",
"self",
".",
"portfolio",
".",
"portfolio_value",
"-",
"portfolio_value_adjustment",
")",
")",
"log",
".",
"info",
"(",
"'Processing capital change to target %s at %s. Capital '",
"'change delta is %s'",
"%",
"(",
"target",
",",
"dt",
",",
"capital_change_amount",
")",
")",
"elif",
"capital_change",
"[",
"'type'",
"]",
"==",
"'delta'",
":",
"target",
"=",
"None",
"capital_change_amount",
"=",
"capital_change",
"[",
"'value'",
"]",
"log",
".",
"info",
"(",
"'Processing capital change of delta %s at %s'",
"%",
"(",
"capital_change_amount",
",",
"dt",
")",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Capital change %s does not indicate a valid type \"",
"\"('target' or 'delta')\"",
"%",
"capital_change",
")",
"return",
"self",
".",
"capital_change_deltas",
".",
"update",
"(",
"{",
"dt",
":",
"capital_change_amount",
"}",
")",
"self",
".",
"metrics_tracker",
".",
"capital_change",
"(",
"capital_change_amount",
")",
"yield",
"{",
"'capital_change'",
":",
"{",
"'date'",
":",
"dt",
",",
"'type'",
":",
"'cash'",
",",
"'target'",
":",
"target",
",",
"'delta'",
":",
"capital_change_amount",
"}",
"}"
] | If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes. | [
"If",
"there",
"is",
"a",
"capital",
"change",
"for",
"a",
"given",
"dt",
"this",
"means",
"the",
"the",
"change",
"occurs",
"before",
"handle_data",
"on",
"the",
"given",
"dt",
".",
"In",
"the",
"case",
"of",
"the",
"change",
"being",
"a",
"target",
"value",
"the",
"change",
"will",
"be",
"computed",
"on",
"the",
"portfolio",
"value",
"according",
"to",
"prices",
"at",
"the",
"given",
"dt"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L675-L725 |
26,086 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.get_environment | def get_environment(self, field='platform'):
"""Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option.
"""
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
try:
return env[field]
except KeyError:
raise ValueError(
'%r is not a valid field for get_environment' % field,
) | python | def get_environment(self, field='platform'):
"""Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option.
"""
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
try:
return env[field]
except KeyError:
raise ValueError(
'%r is not a valid field for get_environment' % field,
) | [
"def",
"get_environment",
"(",
"self",
",",
"field",
"=",
"'platform'",
")",
":",
"env",
"=",
"{",
"'arena'",
":",
"self",
".",
"sim_params",
".",
"arena",
",",
"'data_frequency'",
":",
"self",
".",
"sim_params",
".",
"data_frequency",
",",
"'start'",
":",
"self",
".",
"sim_params",
".",
"first_open",
",",
"'end'",
":",
"self",
".",
"sim_params",
".",
"last_close",
",",
"'capital_base'",
":",
"self",
".",
"sim_params",
".",
"capital_base",
",",
"'platform'",
":",
"self",
".",
"_platform",
"}",
"if",
"field",
"==",
"'*'",
":",
"return",
"env",
"else",
":",
"try",
":",
"return",
"env",
"[",
"field",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'%r is not a valid field for get_environment'",
"%",
"field",
",",
")"
] | Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option. | [
"Query",
"the",
"execution",
"environment",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L728-L782 |
26,087 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.fetch_csv | def fetch_csv(self,
url,
pre_func=None,
post_func=None,
date_column='date',
date_format=None,
timezone=pytz.utc.zone,
symbol=None,
mask=True,
symbol_column=None,
special_params_checker=None,
country_code=None,
**kwargs):
"""Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
country_code : str, optional
Country code to use to disambiguate symbol lookups.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified.
"""
if country_code is None:
country_code = self.default_fetch_csv_country_code(
self.trading_calendar,
)
# Show all the logs every time fetcher is used.
csv_data_source = PandasRequestsCSV(
url,
pre_func,
post_func,
self.asset_finder,
self.trading_calendar.day,
self.sim_params.start_session,
self.sim_params.end_session,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency=self.data_frequency,
country_code=country_code,
special_params_checker=special_params_checker,
**kwargs
)
# ingest this into dataportal
self.data_portal.handle_extra_source(csv_data_source.df,
self.sim_params)
return csv_data_source | python | def fetch_csv(self,
url,
pre_func=None,
post_func=None,
date_column='date',
date_format=None,
timezone=pytz.utc.zone,
symbol=None,
mask=True,
symbol_column=None,
special_params_checker=None,
country_code=None,
**kwargs):
"""Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
country_code : str, optional
Country code to use to disambiguate symbol lookups.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified.
"""
if country_code is None:
country_code = self.default_fetch_csv_country_code(
self.trading_calendar,
)
# Show all the logs every time fetcher is used.
csv_data_source = PandasRequestsCSV(
url,
pre_func,
post_func,
self.asset_finder,
self.trading_calendar.day,
self.sim_params.start_session,
self.sim_params.end_session,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency=self.data_frequency,
country_code=country_code,
special_params_checker=special_params_checker,
**kwargs
)
# ingest this into dataportal
self.data_portal.handle_extra_source(csv_data_source.df,
self.sim_params)
return csv_data_source | [
"def",
"fetch_csv",
"(",
"self",
",",
"url",
",",
"pre_func",
"=",
"None",
",",
"post_func",
"=",
"None",
",",
"date_column",
"=",
"'date'",
",",
"date_format",
"=",
"None",
",",
"timezone",
"=",
"pytz",
".",
"utc",
".",
"zone",
",",
"symbol",
"=",
"None",
",",
"mask",
"=",
"True",
",",
"symbol_column",
"=",
"None",
",",
"special_params_checker",
"=",
"None",
",",
"country_code",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"country_code",
"is",
"None",
":",
"country_code",
"=",
"self",
".",
"default_fetch_csv_country_code",
"(",
"self",
".",
"trading_calendar",
",",
")",
"# Show all the logs every time fetcher is used.",
"csv_data_source",
"=",
"PandasRequestsCSV",
"(",
"url",
",",
"pre_func",
",",
"post_func",
",",
"self",
".",
"asset_finder",
",",
"self",
".",
"trading_calendar",
".",
"day",
",",
"self",
".",
"sim_params",
".",
"start_session",
",",
"self",
".",
"sim_params",
".",
"end_session",
",",
"date_column",
",",
"date_format",
",",
"timezone",
",",
"symbol",
",",
"mask",
",",
"symbol_column",
",",
"data_frequency",
"=",
"self",
".",
"data_frequency",
",",
"country_code",
"=",
"country_code",
",",
"special_params_checker",
"=",
"special_params_checker",
",",
"*",
"*",
"kwargs",
")",
"# ingest this into dataportal",
"self",
".",
"data_portal",
".",
"handle_extra_source",
"(",
"csv_data_source",
".",
"df",
",",
"self",
".",
"sim_params",
")",
"return",
"csv_data_source"
] | Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
country_code : str, optional
Country code to use to disambiguate symbol lookups.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified. | [
"Fetch",
"a",
"csv",
"from",
"a",
"remote",
"url",
"and",
"register",
"the",
"data",
"so",
"that",
"it",
"is",
"queryable",
"from",
"the",
"data",
"object",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L785-L872 |
26,088 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.add_event | def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
) | python | def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
) | [
"def",
"add_event",
"(",
"self",
",",
"rule",
",",
"callback",
")",
":",
"self",
".",
"event_manager",
".",
"add_event",
"(",
"zipline",
".",
"utils",
".",
"events",
".",
"Event",
"(",
"rule",
",",
"callback",
")",
",",
")"
] | Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered. | [
"Adds",
"an",
"event",
"to",
"the",
"algorithm",
"s",
"EventManager",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L874-L886 |
26,089 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.schedule_function | def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
"""Schedules a function to be called according to some timed rules.
Parameters
----------
func : callable[(context, data) -> None]
The function to execute when the rule is triggered.
date_rule : EventRule, optional
The rule for the dates to execute this function.
time_rule : EventRule, optional
The rule for the times to execute this function.
half_days : bool, optional
Should this rule fire on half days?
calendar : Sentinel, optional
Calendar used to reconcile date and time rules.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules`
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn('Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule', stacklevel=3)
date_rule = date_rule or date_rules.every_day()
time_rule = ((time_rule or time_rules.every_minute())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute())
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar('XNYS')
elif calendar is calendars.US_FUTURES:
cal = get_calendar('us_futures')
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=(
'[calendars.US_EQUITIES, calendars.US_FUTURES]'
),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
) | python | def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
"""Schedules a function to be called according to some timed rules.
Parameters
----------
func : callable[(context, data) -> None]
The function to execute when the rule is triggered.
date_rule : EventRule, optional
The rule for the dates to execute this function.
time_rule : EventRule, optional
The rule for the times to execute this function.
half_days : bool, optional
Should this rule fire on half days?
calendar : Sentinel, optional
Calendar used to reconcile date and time rules.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules`
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn('Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule', stacklevel=3)
date_rule = date_rule or date_rules.every_day()
time_rule = ((time_rule or time_rules.every_minute())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute())
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar('XNYS')
elif calendar is calendars.US_FUTURES:
cal = get_calendar('us_futures')
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=(
'[calendars.US_EQUITIES, calendars.US_FUTURES]'
),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
) | [
"def",
"schedule_function",
"(",
"self",
",",
"func",
",",
"date_rule",
"=",
"None",
",",
"time_rule",
"=",
"None",
",",
"half_days",
"=",
"True",
",",
"calendar",
"=",
"None",
")",
":",
"# When the user calls schedule_function(func, <time_rule>), assume that",
"# the user meant to specify a time rule but no date rule, instead of",
"# a date rule and no time rule as the signature suggests",
"if",
"isinstance",
"(",
"date_rule",
",",
"(",
"AfterOpen",
",",
"BeforeClose",
")",
")",
"and",
"not",
"time_rule",
":",
"warnings",
".",
"warn",
"(",
"'Got a time rule for the second positional argument '",
"'date_rule. You should use keyword argument '",
"'time_rule= when calling schedule_function without '",
"'specifying a date_rule'",
",",
"stacklevel",
"=",
"3",
")",
"date_rule",
"=",
"date_rule",
"or",
"date_rules",
".",
"every_day",
"(",
")",
"time_rule",
"=",
"(",
"(",
"time_rule",
"or",
"time_rules",
".",
"every_minute",
"(",
")",
")",
"if",
"self",
".",
"sim_params",
".",
"data_frequency",
"==",
"'minute'",
"else",
"# If we are in daily mode the time_rule is ignored.",
"time_rules",
".",
"every_minute",
"(",
")",
")",
"# Check the type of the algorithm's schedule before pulling calendar",
"# Note that the ExchangeTradingSchedule is currently the only",
"# TradingSchedule class, so this is unlikely to be hit",
"if",
"calendar",
"is",
"None",
":",
"cal",
"=",
"self",
".",
"trading_calendar",
"elif",
"calendar",
"is",
"calendars",
".",
"US_EQUITIES",
":",
"cal",
"=",
"get_calendar",
"(",
"'XNYS'",
")",
"elif",
"calendar",
"is",
"calendars",
".",
"US_FUTURES",
":",
"cal",
"=",
"get_calendar",
"(",
"'us_futures'",
")",
"else",
":",
"raise",
"ScheduleFunctionInvalidCalendar",
"(",
"given_calendar",
"=",
"calendar",
",",
"allowed_calendars",
"=",
"(",
"'[calendars.US_EQUITIES, calendars.US_FUTURES]'",
")",
",",
")",
"self",
".",
"add_event",
"(",
"make_eventrule",
"(",
"date_rule",
",",
"time_rule",
",",
"cal",
",",
"half_days",
")",
",",
"func",
",",
")"
] | Schedules a function to be called according to some timed rules.
Parameters
----------
func : callable[(context, data) -> None]
The function to execute when the rule is triggered.
date_rule : EventRule, optional
The rule for the dates to execute this function.
time_rule : EventRule, optional
The rule for the times to execute this function.
half_days : bool, optional
Should this rule fire on half days?
calendar : Sentinel, optional
Calendar used to reconcile date and time rules.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules` | [
"Schedules",
"a",
"function",
"to",
"be",
"called",
"according",
"to",
"some",
"timed",
"rules",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L889-L951 |
26,090 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.continuous_future | def continuous_future(self,
root_symbol_str,
offset=0,
roll='volume',
adjustment='mul'):
"""Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : ContinuousFuture
The continuous future specifier.
"""
return self.asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
) | python | def continuous_future(self,
root_symbol_str,
offset=0,
roll='volume',
adjustment='mul'):
"""Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : ContinuousFuture
The continuous future specifier.
"""
return self.asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
) | [
"def",
"continuous_future",
"(",
"self",
",",
"root_symbol_str",
",",
"offset",
"=",
"0",
",",
"roll",
"=",
"'volume'",
",",
"adjustment",
"=",
"'mul'",
")",
":",
"return",
"self",
".",
"asset_finder",
".",
"create_continuous_future",
"(",
"root_symbol_str",
",",
"offset",
",",
"roll",
",",
"adjustment",
",",
")"
] | Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : ContinuousFuture
The continuous future specifier. | [
"Create",
"a",
"specifier",
"for",
"a",
"continuous",
"contract",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1000-L1032 |
26,091 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.symbol | def symbol(self, symbol_str, country_code=None):
"""Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for symbol->sid resolution.
_lookup_date = self._symbol_lookup_date \
if self._symbol_lookup_date is not None \
else self.sim_params.end_session
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
country_code=country_code,
) | python | def symbol(self, symbol_str, country_code=None):
"""Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for symbol->sid resolution.
_lookup_date = self._symbol_lookup_date \
if self._symbol_lookup_date is not None \
else self.sim_params.end_session
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
country_code=country_code,
) | [
"def",
"symbol",
"(",
"self",
",",
"symbol_str",
",",
"country_code",
"=",
"None",
")",
":",
"# If the user has not set the symbol lookup date,",
"# use the end_session as the date for symbol->sid resolution.",
"_lookup_date",
"=",
"self",
".",
"_symbol_lookup_date",
"if",
"self",
".",
"_symbol_lookup_date",
"is",
"not",
"None",
"else",
"self",
".",
"sim_params",
".",
"end_session",
"return",
"self",
".",
"asset_finder",
".",
"lookup_symbol",
"(",
"symbol_str",
",",
"as_of_date",
"=",
"_lookup_date",
",",
"country_code",
"=",
"country_code",
",",
")"
] | Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date` | [
"Lookup",
"an",
"Equity",
"by",
"its",
"ticker",
"symbol",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1039-L1074 |
26,092 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.symbols | def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args] | python | def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args] | [
"def",
"symbols",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"self",
".",
"symbol",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
"for",
"identifier",
"in",
"args",
"]"
] | Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date` | [
"Lookup",
"multuple",
"Equities",
"as",
"a",
"list",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1077-L1105 |
26,093 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.validate_order_params | def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.portfolio,
self.get_datetime(),
self.trading_client.current_data) | python | def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.portfolio,
self.get_datetime(),
self.trading_client.current_data) | [
"def",
"validate_order_params",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"limit_price",
",",
"stop_price",
",",
"style",
")",
":",
"if",
"not",
"self",
".",
"initialized",
":",
"raise",
"OrderDuringInitialize",
"(",
"msg",
"=",
"\"order() can only be called from within handle_data()\"",
")",
"if",
"style",
":",
"if",
"limit_price",
":",
"raise",
"UnsupportedOrderParameters",
"(",
"msg",
"=",
"\"Passing both limit_price and style is not supported.\"",
")",
"if",
"stop_price",
":",
"raise",
"UnsupportedOrderParameters",
"(",
"msg",
"=",
"\"Passing both stop_price and style is not supported.\"",
")",
"for",
"control",
"in",
"self",
".",
"trading_controls",
":",
"control",
".",
"validate",
"(",
"asset",
",",
"amount",
",",
"self",
".",
"portfolio",
",",
"self",
".",
"get_datetime",
"(",
")",
",",
"self",
".",
"trading_client",
".",
"current_data",
")"
] | Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found. | [
"Helper",
"method",
"for",
"validating",
"parameters",
"to",
"the",
"order",
"API",
"function",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1302-L1335 |
26,094 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.__convert_order_params_for_blotter | def __convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price, asset=asset)
if limit_price:
return LimitOrder(limit_price, asset=asset)
if stop_price:
return StopOrder(stop_price, asset=asset)
else:
return MarketOrder() | python | def __convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price, asset=asset)
if limit_price:
return LimitOrder(limit_price, asset=asset)
if stop_price:
return StopOrder(stop_price, asset=asset)
else:
return MarketOrder() | [
"def",
"__convert_order_params_for_blotter",
"(",
"asset",
",",
"limit_price",
",",
"stop_price",
",",
"style",
")",
":",
"if",
"style",
":",
"assert",
"(",
"limit_price",
",",
"stop_price",
")",
"==",
"(",
"None",
",",
"None",
")",
"return",
"style",
"if",
"limit_price",
"and",
"stop_price",
":",
"return",
"StopLimitOrder",
"(",
"limit_price",
",",
"stop_price",
",",
"asset",
"=",
"asset",
")",
"if",
"limit_price",
":",
"return",
"LimitOrder",
"(",
"limit_price",
",",
"asset",
"=",
"asset",
")",
"if",
"stop_price",
":",
"return",
"StopOrder",
"(",
"stop_price",
",",
"asset",
"=",
"asset",
")",
"else",
":",
"return",
"MarketOrder",
"(",
")"
] | Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None). | [
"Helper",
"method",
"for",
"converting",
"deprecated",
"limit_price",
"and",
"stop_price",
"arguments",
"into",
"ExecutionStyle",
"instances",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1338-L1359 |
26,095 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.order_value | def order_value(self,
asset,
value,
limit_price=None,
stop_price=None,
style=None):
"""Place an order by desired value rather than desired number of
shares.
Parameters
----------
asset : Asset
The asset that this order is for.
value : float
If the requested asset exists, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_value_amount(asset, value)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | python | def order_value(self,
asset,
value,
limit_price=None,
stop_price=None,
style=None):
"""Place an order by desired value rather than desired number of
shares.
Parameters
----------
asset : Asset
The asset that this order is for.
value : float
If the requested asset exists, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_value_amount(asset, value)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | [
"def",
"order_value",
"(",
"self",
",",
"asset",
",",
"value",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_can_order_asset",
"(",
"asset",
")",
":",
"return",
"None",
"amount",
"=",
"self",
".",
"_calculate_order_value_amount",
"(",
"asset",
",",
"value",
")",
"return",
"self",
".",
"order",
"(",
"asset",
",",
"amount",
",",
"limit_price",
"=",
"limit_price",
",",
"stop_price",
"=",
"stop_price",
",",
"style",
"=",
"style",
")"
] | Place an order by desired value rather than desired number of
shares.
Parameters
----------
asset : Asset
The asset that this order is for.
value : float
If the requested asset exists, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent` | [
"Place",
"an",
"order",
"by",
"desired",
"value",
"rather",
"than",
"desired",
"number",
"of",
"shares",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1363-L1414 |
26,096 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm._sync_last_sale_prices | def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt | python | def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt | [
"def",
"_sync_last_sale_prices",
"(",
"self",
",",
"dt",
"=",
"None",
")",
":",
"if",
"dt",
"is",
"None",
":",
"dt",
"=",
"self",
".",
"datetime",
"if",
"dt",
"!=",
"self",
".",
"_last_sync_time",
":",
"self",
".",
"metrics_tracker",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"self",
".",
"data_portal",
",",
")",
"self",
".",
"_last_sync_time",
"=",
"dt"
] | Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap. | [
"Sync",
"the",
"last",
"sale",
"prices",
"on",
"the",
"metrics",
"tracker",
"to",
"a",
"given",
"datetime",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1420-L1442 |
26,097 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.on_dt_changed | def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
self.datetime = dt
self.blotter.set_date(dt) | python | def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
self.datetime = dt
self.blotter.set_date(dt) | [
"def",
"on_dt_changed",
"(",
"self",
",",
"dt",
")",
":",
"self",
".",
"datetime",
"=",
"dt",
"self",
".",
"blotter",
".",
"set_date",
"(",
"dt",
")"
] | Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here. | [
"Callback",
"triggered",
"by",
"the",
"simulation",
"loop",
"whenever",
"the",
"current",
"dt",
"changes",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1457-L1466 |
26,098 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.get_datetime | def get_datetime(self, tz=None):
"""
Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
dt = dt.astimezone(tz)
return dt | python | def get_datetime(self, tz=None):
"""
Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
dt = dt.astimezone(tz)
return dt | [
"def",
"get_datetime",
"(",
"self",
",",
"tz",
"=",
"None",
")",
":",
"dt",
"=",
"self",
".",
"datetime",
"assert",
"dt",
".",
"tzinfo",
"==",
"pytz",
".",
"utc",
",",
"\"Algorithm should have a utc datetime\"",
"if",
"tz",
"is",
"not",
"None",
":",
"dt",
"=",
"dt",
".",
"astimezone",
"(",
"tz",
")",
"return",
"dt"
] | Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``. | [
"Returns",
"the",
"current",
"simulation",
"datetime",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1471-L1489 |
26,099 | quantopian/zipline | zipline/algorithm.py | TradingAlgorithm.set_slippage | def set_slippage(self, us_equities=None, us_futures=None):
"""Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures | python | def set_slippage(self, us_equities=None, us_futures=None):
"""Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures | [
"def",
"set_slippage",
"(",
"self",
",",
"us_equities",
"=",
"None",
",",
"us_futures",
"=",
"None",
")",
":",
"if",
"self",
".",
"initialized",
":",
"raise",
"SetSlippagePostInit",
"(",
")",
"if",
"us_equities",
"is",
"not",
"None",
":",
"if",
"Equity",
"not",
"in",
"us_equities",
".",
"allowed_asset_types",
":",
"raise",
"IncompatibleSlippageModel",
"(",
"asset_type",
"=",
"'equities'",
",",
"given_model",
"=",
"us_equities",
",",
"supported_asset_types",
"=",
"us_equities",
".",
"allowed_asset_types",
",",
")",
"self",
".",
"blotter",
".",
"slippage_models",
"[",
"Equity",
"]",
"=",
"us_equities",
"if",
"us_futures",
"is",
"not",
"None",
":",
"if",
"Future",
"not",
"in",
"us_futures",
".",
"allowed_asset_types",
":",
"raise",
"IncompatibleSlippageModel",
"(",
"asset_type",
"=",
"'futures'",
",",
"given_model",
"=",
"us_futures",
",",
"supported_asset_types",
"=",
"us_futures",
".",
"allowed_asset_types",
",",
")",
"self",
".",
"blotter",
".",
"slippage_models",
"[",
"Future",
"]",
"=",
"us_futures"
] | Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel` | [
"Set",
"the",
"slippage",
"models",
"for",
"the",
"simulation",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1492-L1525 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.