id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
25,800 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarWriter.truncate | def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir) | python | def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir) | [
"def",
"truncate",
"(",
"self",
",",
"date",
")",
":",
"truncate_slice_end",
"=",
"self",
".",
"data_len_for_day",
"(",
"date",
")",
"glob_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_rootdir",
",",
"\"*\"",
",",
"\"*\"",
",",
"\"*.bcolz\"",
")",
"sid_paths",
"=",
"sorted",
"(",
"glob",
"(",
"glob_path",
")",
")",
"for",
"sid_path",
"in",
"sid_paths",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sid_path",
")",
"try",
":",
"table",
"=",
"bcolz",
".",
"open",
"(",
"rootdir",
"=",
"sid_path",
")",
"except",
"IOError",
":",
"continue",
"if",
"table",
".",
"len",
"<=",
"truncate_slice_end",
":",
"logger",
".",
"info",
"(",
"\"{0} not past truncate date={1}.\"",
",",
"file_name",
",",
"date",
")",
"continue",
"logger",
".",
"info",
"(",
"\"Truncating {0} at end_date={1}\"",
",",
"file_name",
",",
"date",
".",
"date",
"(",
")",
")",
"table",
".",
"resize",
"(",
"truncate_slice_end",
")",
"# Update end session in metadata.",
"metadata",
"=",
"BcolzMinuteBarMetadata",
".",
"read",
"(",
"self",
".",
"_rootdir",
")",
"metadata",
".",
"end_session",
"=",
"date",
"metadata",
".",
"write",
"(",
"self",
".",
"_rootdir",
")"
] | Truncate data beyond this date in all ctables. | [
"Truncate",
"data",
"beyond",
"this",
"date",
"in",
"all",
"ctables",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L856-L883 |
25,801 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarReader._minutes_to_exclude | def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes | python | def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes | [
"def",
"_minutes_to_exclude",
"(",
"self",
")",
":",
"market_opens",
"=",
"self",
".",
"_market_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
"market_closes",
"=",
"self",
".",
"_market_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
"minutes_per_day",
"=",
"(",
"market_closes",
"-",
"market_opens",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"early_indices",
"=",
"np",
".",
"where",
"(",
"minutes_per_day",
"!=",
"self",
".",
"_minutes_per_day",
"-",
"1",
")",
"[",
"0",
"]",
"early_opens",
"=",
"self",
".",
"_market_opens",
"[",
"early_indices",
"]",
"early_closes",
"=",
"self",
".",
"_market_closes",
"[",
"early_indices",
"]",
"minutes",
"=",
"[",
"(",
"market_open",
",",
"early_close",
")",
"for",
"market_open",
",",
"early_close",
"in",
"zip",
"(",
"early_opens",
",",
"early_closes",
")",
"]",
"return",
"minutes"
] | Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes. | [
"Calculate",
"the",
"minutes",
"which",
"should",
"be",
"excluded",
"when",
"a",
"window",
"occurs",
"on",
"days",
"which",
"had",
"an",
"early",
"close",
"i",
".",
"e",
".",
"days",
"where",
"the",
"close",
"based",
"on",
"the",
"regular",
"period",
"of",
"minutes",
"per",
"day",
"and",
"the",
"market",
"close",
"do",
"not",
"match",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L991-L1013 |
25,802 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarReader.get_value | def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value | python | def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value | [
"def",
"get_value",
"(",
"self",
",",
"sid",
",",
"dt",
",",
"field",
")",
":",
"if",
"self",
".",
"_last_get_value_dt_value",
"==",
"dt",
".",
"value",
":",
"minute_pos",
"=",
"self",
".",
"_last_get_value_dt_position",
"else",
":",
"try",
":",
"minute_pos",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"dt",
")",
"except",
"ValueError",
":",
"raise",
"NoDataOnDate",
"(",
")",
"self",
".",
"_last_get_value_dt_value",
"=",
"dt",
".",
"value",
"self",
".",
"_last_get_value_dt_position",
"=",
"minute_pos",
"try",
":",
"value",
"=",
"self",
".",
"_open_minute_file",
"(",
"field",
",",
"sid",
")",
"[",
"minute_pos",
"]",
"except",
"IndexError",
":",
"value",
"=",
"0",
"if",
"value",
"==",
"0",
":",
"if",
"field",
"==",
"'volume'",
":",
"return",
"0",
"else",
":",
"return",
"np",
".",
"nan",
"if",
"field",
"!=",
"'volume'",
":",
"value",
"*=",
"self",
".",
"_ohlc_ratio_inverse_for_sid",
"(",
"sid",
")",
"return",
"value"
] | Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.) | [
"Retrieve",
"the",
"pricing",
"info",
"for",
"the",
"given",
"sid",
"dt",
"and",
"field",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1098-L1149 |
25,803 | quantopian/zipline | zipline/data/minute_bars.py | BcolzMinuteBarReader._find_position_of_minute | def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
) | python | def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
) | [
"def",
"_find_position_of_minute",
"(",
"self",
",",
"minute_dt",
")",
":",
"return",
"find_position_of_minute",
"(",
"self",
".",
"_market_open_values",
",",
"self",
".",
"_market_close_values",
",",
"minute_dt",
".",
"value",
"/",
"NANOS_IN_MINUTE",
",",
"self",
".",
"_minutes_per_day",
",",
"False",
",",
")"
] | Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day. | [
"Internal",
"method",
"that",
"returns",
"the",
"position",
"of",
"the",
"given",
"minute",
"in",
"the",
"list",
"of",
"every",
"trading",
"minute",
"since",
"market",
"open",
"of",
"the",
"first",
"trading",
"day",
".",
"Adjusts",
"non",
"market",
"minutes",
"to",
"the",
"last",
"close",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1203-L1228 |
25,804 | quantopian/zipline | zipline/data/minute_bars.py | H5MinuteBarUpdateWriter.write | def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0) | python | def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0) | [
"def",
"write",
"(",
"self",
",",
"frames",
")",
":",
"with",
"HDFStore",
"(",
"self",
".",
"_path",
",",
"'w'",
",",
"complevel",
"=",
"self",
".",
"_complevel",
",",
"complib",
"=",
"self",
".",
"_complib",
")",
"as",
"store",
":",
"panel",
"=",
"pd",
".",
"Panel",
".",
"from_dict",
"(",
"dict",
"(",
"frames",
")",
")",
"panel",
".",
"to_hdf",
"(",
"store",
",",
"'updates'",
")",
"with",
"tables",
".",
"open_file",
"(",
"self",
".",
"_path",
",",
"mode",
"=",
"'r+'",
")",
"as",
"h5file",
":",
"h5file",
".",
"set_node_attr",
"(",
"'/'",
",",
"'version'",
",",
"0",
")"
] | Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data. | [
"Write",
"the",
"frames",
"to",
"the",
"target",
"HDF5",
"file",
"using",
"the",
"format",
"used",
"by",
"pd",
".",
"Panel",
".",
"to_hdf"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1346-L1363 |
25,805 | quantopian/zipline | zipline/pipeline/loaders/utils.py | next_event_indexer | def next_event_indexer(all_dates,
data_query_cutoff,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right')
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out | python | def next_event_indexer(all_dates,
data_query_cutoff,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right')
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out | [
"def",
"next_event_indexer",
"(",
"all_dates",
",",
"data_query_cutoff",
",",
"all_sids",
",",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
":",
"validate_event_metadata",
"(",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
"out",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"all_dates",
")",
",",
"len",
"(",
"all_sids",
")",
")",
",",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"sid_ixs",
"=",
"all_sids",
".",
"searchsorted",
"(",
"event_sids",
")",
"# side='right' here ensures that we include the event date itself",
"# if it's in all_dates.",
"dt_ixs",
"=",
"all_dates",
".",
"searchsorted",
"(",
"event_dates",
",",
"side",
"=",
"'right'",
")",
"ts_ixs",
"=",
"data_query_cutoff",
".",
"searchsorted",
"(",
"event_timestamps",
",",
"side",
"=",
"'right'",
")",
"# Walk backward through the events, writing the index of the event into",
"# slots ranging from the event's timestamp to its asof. This depends for",
"# correctness on the fact that event_dates is sorted in ascending order,",
"# because we need to overwrite later events with earlier ones if their",
"# eligible windows overlap.",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"event_sids",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"start_ix",
"=",
"ts_ixs",
"[",
"i",
"]",
"end_ix",
"=",
"dt_ixs",
"[",
"i",
"]",
"out",
"[",
"start_ix",
":",
"end_ix",
",",
"sid_ixs",
"[",
"i",
"]",
"]",
"=",
"i",
"return",
"out"
] | Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``. | [
"Construct",
"an",
"index",
"array",
"that",
"when",
"applied",
"to",
"an",
"array",
"of",
"values",
"produces",
"a",
"2D",
"array",
"containing",
"the",
"values",
"associated",
"with",
"the",
"next",
"event",
"for",
"each",
"sid",
"at",
"each",
"moment",
"in",
"time",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L25-L79 |
25,806 | quantopian/zipline | zipline/pipeline/loaders/utils.py | previous_event_indexer | def previous_event_indexer(data_query_cutoff_times,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right')
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out | python | def previous_event_indexer(data_query_cutoff_times,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right')
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out | [
"def",
"previous_event_indexer",
"(",
"data_query_cutoff_times",
",",
"all_sids",
",",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
":",
"validate_event_metadata",
"(",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
"out",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"data_query_cutoff_times",
")",
",",
"len",
"(",
"all_sids",
")",
")",
",",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
",",
")",
"eff_dts",
"=",
"np",
".",
"maximum",
"(",
"event_dates",
",",
"event_timestamps",
")",
"sid_ixs",
"=",
"all_sids",
".",
"searchsorted",
"(",
"event_sids",
")",
"dt_ixs",
"=",
"data_query_cutoff_times",
".",
"searchsorted",
"(",
"eff_dts",
",",
"side",
"=",
"'right'",
")",
"# Walk backwards through the events, writing the index of the event into",
"# slots ranging from max(event_date, event_timestamp) to the start of the",
"# previously-written event. This depends for correctness on the fact that",
"# event_dates is sorted in ascending order, because we need to have written",
"# later events so we know where to stop forward-filling earlier events.",
"last_written",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"event_dates",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"sid_ix",
"=",
"sid_ixs",
"[",
"i",
"]",
"dt_ix",
"=",
"dt_ixs",
"[",
"i",
"]",
"out",
"[",
"dt_ix",
":",
"last_written",
".",
"get",
"(",
"sid_ix",
",",
"None",
")",
",",
"sid_ix",
"]",
"=",
"i",
"last_written",
"[",
"sid_ix",
"]",
"=",
"dt_ix",
"return",
"out"
] | Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``. | [
"Construct",
"an",
"index",
"array",
"that",
"when",
"applied",
"to",
"an",
"array",
"of",
"values",
"produces",
"a",
"2D",
"array",
"containing",
"the",
"values",
"associated",
"with",
"the",
"previous",
"event",
"for",
"each",
"sid",
"at",
"each",
"moment",
"in",
"time",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L82-L138 |
25,807 | quantopian/zipline | zipline/pipeline/loaders/utils.py | last_in_date_group | def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group | python | def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group | [
"def",
"last_in_date_group",
"(",
"df",
",",
"data_query_cutoff_times",
",",
"assets",
",",
"reindex",
"=",
"True",
",",
"have_sids",
"=",
"True",
",",
"extra_groupers",
"=",
"None",
")",
":",
"idx",
"=",
"[",
"data_query_cutoff_times",
"[",
"data_query_cutoff_times",
".",
"searchsorted",
"(",
"df",
"[",
"TS_FIELD_NAME",
"]",
".",
"values",
",",
")",
"]",
"]",
"if",
"have_sids",
":",
"idx",
"+=",
"[",
"SID_FIELD_NAME",
"]",
"if",
"extra_groupers",
"is",
"None",
":",
"extra_groupers",
"=",
"[",
"]",
"idx",
"+=",
"extra_groupers",
"last_in_group",
"=",
"df",
".",
"drop",
"(",
"TS_FIELD_NAME",
",",
"axis",
"=",
"1",
")",
".",
"groupby",
"(",
"idx",
",",
"sort",
"=",
"False",
",",
")",
".",
"last",
"(",
")",
"# For the number of things that we're grouping by (except TS), unstack",
"# the df. Done this way because of an unresolved pandas bug whereby",
"# passing a list of levels with mixed dtypes to unstack causes the",
"# resulting DataFrame to have all object-type columns.",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"idx",
")",
"-",
"1",
")",
":",
"last_in_group",
"=",
"last_in_group",
".",
"unstack",
"(",
"-",
"1",
")",
"if",
"reindex",
":",
"if",
"have_sids",
":",
"cols",
"=",
"last_in_group",
".",
"columns",
"last_in_group",
"=",
"last_in_group",
".",
"reindex",
"(",
"index",
"=",
"data_query_cutoff_times",
",",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"tuple",
"(",
"cols",
".",
"levels",
"[",
"0",
":",
"len",
"(",
"extra_groupers",
")",
"+",
"1",
"]",
")",
"+",
"(",
"assets",
",",
")",
",",
"names",
"=",
"cols",
".",
"names",
",",
")",
",",
")",
"else",
":",
"last_in_group",
"=",
"last_in_group",
".",
"reindex",
"(",
"data_query_cutoff_times",
")",
"return",
"last_in_group"
] | Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns. | [
"Determine",
"the",
"last",
"piece",
"of",
"information",
"known",
"on",
"each",
"date",
"in",
"the",
"date"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L141-L213 |
25,808 | quantopian/zipline | zipline/pipeline/loaders/utils.py | ffill_across_cols | def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype) | python | def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype) | [
"def",
"ffill_across_cols",
"(",
"df",
",",
"columns",
",",
"name_map",
")",
":",
"df",
".",
"ffill",
"(",
"inplace",
"=",
"True",
")",
"# Fill in missing values specified by each column. This is made",
"# significantly more complex by the fact that we need to work around",
"# two pandas issues:",
"# 1) When we have sids, if there are no records for a given sid for any",
"# dates, pandas will generate a column full of NaNs for that sid.",
"# This means that some of the columns in `dense_output` are now",
"# float instead of the intended dtype, so we have to coerce back to",
"# our expected type and convert NaNs into the desired missing value.",
"# 2) DataFrame.ffill assumes that receiving None as a fill-value means",
"# that no value was passed. Consequently, there's no way to tell",
"# pandas to replace NaNs in an object column with None using fillna,",
"# so we have to roll our own instead using df.where.",
"for",
"column",
"in",
"columns",
":",
"column_name",
"=",
"name_map",
"[",
"column",
".",
"name",
"]",
"# Special logic for strings since `fillna` doesn't work if the",
"# missing value is `None`.",
"if",
"column",
".",
"dtype",
"==",
"categorical_dtype",
":",
"df",
"[",
"column_name",
"]",
"=",
"df",
"[",
"column",
".",
"name",
"]",
".",
"where",
"(",
"pd",
".",
"notnull",
"(",
"df",
"[",
"column_name",
"]",
")",
",",
"column",
".",
"missing_value",
")",
"else",
":",
"# We need to execute `fillna` before `astype` in case the",
"# column contains NaNs and needs to be cast to bool or int.",
"# This is so that the NaNs are replaced first, since pandas",
"# can't convert NaNs for those types.",
"df",
"[",
"column_name",
"]",
"=",
"df",
"[",
"column_name",
"]",
".",
"fillna",
"(",
"column",
".",
"missing_value",
")",
".",
"astype",
"(",
"column",
".",
"dtype",
")"
] | Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`. | [
"Forward",
"fill",
"values",
"in",
"a",
"DataFrame",
"with",
"special",
"logic",
"to",
"handle",
"cases",
"that",
"pd",
".",
"DataFrame",
".",
"ffill",
"cannot",
"and",
"cast",
"columns",
"to",
"appropriate",
"types",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L216-L264 |
25,809 | quantopian/zipline | zipline/pipeline/loaders/utils.py | shift_dates | def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift] | python | def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift] | [
"def",
"shift_dates",
"(",
"dates",
",",
"start_date",
",",
"end_date",
",",
"shift",
")",
":",
"try",
":",
"start",
"=",
"dates",
".",
"get_loc",
"(",
"start_date",
")",
"except",
"KeyError",
":",
"if",
"start_date",
"<",
"dates",
"[",
"0",
"]",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requested data starting on {query_start}, \"",
"\"but first known date is {calendar_start}\"",
")",
".",
"format",
"(",
"query_start",
"=",
"str",
"(",
"start_date",
")",
",",
"calendar_start",
"=",
"str",
"(",
"dates",
"[",
"0",
"]",
")",
",",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Query start %s not in calendar\"",
"%",
"start_date",
")",
"# Make sure that shifting doesn't push us out of the calendar.",
"if",
"start",
"<",
"shift",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requested data from {shift}\"",
"\" days before {query_start}, but first known date is only \"",
"\"{start} days earlier.\"",
")",
".",
"format",
"(",
"shift",
"=",
"shift",
",",
"query_start",
"=",
"start_date",
",",
"start",
"=",
"start",
")",
",",
")",
"try",
":",
"end",
"=",
"dates",
".",
"get_loc",
"(",
"end_date",
")",
"except",
"KeyError",
":",
"if",
"end_date",
">",
"dates",
"[",
"-",
"1",
"]",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requesting data up to {query_end}, \"",
"\"but last known date is {calendar_end}\"",
")",
".",
"format",
"(",
"query_end",
"=",
"end_date",
",",
"calendar_end",
"=",
"dates",
"[",
"-",
"1",
"]",
",",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Query end %s not in calendar\"",
"%",
"end_date",
")",
"return",
"dates",
"[",
"start",
"-",
"shift",
"]",
",",
"dates",
"[",
"end",
"-",
"shift",
"]"
] | Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates. | [
"Shift",
"dates",
"of",
"a",
"pipeline",
"query",
"back",
"by",
"shift",
"days",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L267-L330 |
25,810 | quantopian/zipline | zipline/utils/sharedoc.py | format_docstring | def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params) | python | def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params) | [
"def",
"format_docstring",
"(",
"owner_name",
",",
"docstring",
",",
"formatters",
")",
":",
"# Build a dict of parameters to a vanilla format() call by searching for",
"# each entry in **formatters and applying any leading whitespace to each",
"# line in the desired substitution.",
"format_params",
"=",
"{",
"}",
"for",
"target",
",",
"doc_for_target",
"in",
"iteritems",
"(",
"formatters",
")",
":",
"# Search for '{name}', with optional leading whitespace.",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'^(\\s*)'",
"+",
"'({'",
"+",
"target",
"+",
"'})$'",
",",
"re",
".",
"MULTILINE",
")",
"matches",
"=",
"regex",
".",
"findall",
"(",
"docstring",
")",
"if",
"not",
"matches",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find template for parameter {!r} in docstring \"",
"\"for {}.\"",
"\"\\nParameter name must be alone on a line surrounded by \"",
"\"braces.\"",
".",
"format",
"(",
"target",
",",
"owner_name",
")",
",",
")",
"elif",
"len",
"(",
"matches",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Couldn't found multiple templates for parameter {!r}\"",
"\"in docstring for {}.\"",
"\"\\nParameter should only appear once.\"",
".",
"format",
"(",
"target",
",",
"owner_name",
")",
")",
"(",
"leading_whitespace",
",",
"_",
")",
"=",
"matches",
"[",
"0",
"]",
"format_params",
"[",
"target",
"]",
"=",
"pad_lines_after_first",
"(",
"leading_whitespace",
",",
"doc_for_target",
",",
")",
"return",
"docstring",
".",
"format",
"(",
"*",
"*",
"format_params",
")"
] | Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string. | [
"Template",
"formatters",
"into",
"docstring",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/sharedoc.py#L35-L82 |
25,811 | quantopian/zipline | zipline/utils/sharedoc.py | templated_docstring | def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator | python | def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator | [
"def",
"templated_docstring",
"(",
"*",
"*",
"docs",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"f",
".",
"__doc__",
"=",
"format_docstring",
"(",
"f",
".",
"__name__",
",",
"f",
".",
"__doc__",
",",
"docs",
")",
"return",
"f",
"return",
"decorator"
] | Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar' | [
"Decorator",
"allowing",
"the",
"use",
"of",
"templated",
"docstrings",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/sharedoc.py#L85-L101 |
25,812 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.add | def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term | python | def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term | [
"def",
"add",
"(",
"self",
",",
"term",
",",
"name",
",",
"overwrite",
"=",
"False",
")",
":",
"self",
".",
"validate_column",
"(",
"name",
",",
"term",
")",
"columns",
"=",
"self",
".",
"columns",
"if",
"name",
"in",
"columns",
":",
"if",
"overwrite",
":",
"self",
".",
"remove",
"(",
"name",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"Column '{}' already exists.\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"isinstance",
"(",
"term",
",",
"ComputableTerm",
")",
":",
"raise",
"TypeError",
"(",
"\"{term} is not a valid pipeline column. Did you mean to \"",
"\"append '.latest'?\"",
".",
"format",
"(",
"term",
"=",
"term",
")",
")",
"self",
".",
"_columns",
"[",
"name",
"]",
"=",
"term"
] | Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`. | [
"Add",
"a",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L80-L112 |
25,813 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.set_screen | def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen | python | def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen | [
"def",
"set_screen",
"(",
"self",
",",
"screen",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"self",
".",
"_screen",
"is",
"not",
"None",
"and",
"not",
"overwrite",
":",
"raise",
"ValueError",
"(",
"\"set_screen() called with overwrite=False and screen already \"",
"\"set.\\n\"",
"\"If you want to apply multiple filters as a screen use \"",
"\"set_screen(filter1 & filter2 & ...).\\n\"",
"\"If you want to replace the previous screen with a new one, \"",
"\"use set_screen(new_filter, overwrite=True).\"",
")",
"self",
".",
"_screen",
"=",
"screen"
] | Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error. | [
"Set",
"a",
"screen",
"on",
"this",
"Pipeline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L137-L158 |
25,814 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.to_execution_plan | def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
) | python | def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
) | [
"def",
"to_execution_plan",
"(",
"self",
",",
"domain",
",",
"default_screen",
",",
"start_date",
",",
"end_date",
")",
":",
"if",
"self",
".",
"_domain",
"is",
"not",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"not",
"domain",
":",
"raise",
"AssertionError",
"(",
"\"Attempted to compile Pipeline with domain {} to execution \"",
"\"plan with different domain {}.\"",
".",
"format",
"(",
"self",
".",
"_domain",
",",
"domain",
")",
")",
"return",
"ExecutionPlan",
"(",
"domain",
"=",
"domain",
",",
"terms",
"=",
"self",
".",
"_prepare_graph_terms",
"(",
"default_screen",
")",
",",
"start_date",
"=",
"start_date",
",",
"end_date",
"=",
"end_date",
",",
")"
] | Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements. | [
"Compile",
"into",
"an",
"ExecutionPlan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L160-L199 |
25,815 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline._prepare_graph_terms | def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns | python | def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns | [
"def",
"_prepare_graph_terms",
"(",
"self",
",",
"default_screen",
")",
":",
"columns",
"=",
"self",
".",
"columns",
".",
"copy",
"(",
")",
"screen",
"=",
"self",
".",
"screen",
"if",
"screen",
"is",
"None",
":",
"screen",
"=",
"default_screen",
"columns",
"[",
"SCREEN_NAME",
"]",
"=",
"screen",
"return",
"columns"
] | Helper for to_graph and to_execution_plan. | [
"Helper",
"for",
"to_graph",
"and",
"to_execution_plan",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L217-L224 |
25,816 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.show_graph | def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format) | python | def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format) | [
"def",
"show_graph",
"(",
"self",
",",
"format",
"=",
"'svg'",
")",
":",
"g",
"=",
"self",
".",
"to_simple_graph",
"(",
"AssetExists",
"(",
")",
")",
"if",
"format",
"==",
"'svg'",
":",
"return",
"g",
".",
"svg",
"elif",
"format",
"==",
"'png'",
":",
"return",
"g",
".",
"png",
"elif",
"format",
"==",
"'jpeg'",
":",
"return",
"g",
".",
"jpeg",
"else",
":",
"# We should never get here because of the expect_element decorator",
"# above.",
"raise",
"AssertionError",
"(",
"\"Unknown graph format %r.\"",
"%",
"format",
")"
] | Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'. | [
"Render",
"this",
"Pipeline",
"as",
"a",
"DAG",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L227-L246 |
25,817 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline._output_terms | def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(six.itervalues(self._columns))
screen = self.screen
if screen is not None:
terms.append(screen)
return terms | python | def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(six.itervalues(self._columns))
screen = self.screen
if screen is not None:
terms.append(screen)
return terms | [
"def",
"_output_terms",
"(",
"self",
")",
":",
"terms",
"=",
"list",
"(",
"six",
".",
"itervalues",
"(",
"self",
".",
"_columns",
")",
")",
"screen",
"=",
"self",
".",
"screen",
"if",
"screen",
"is",
"not",
"None",
":",
"terms",
".",
"append",
"(",
"screen",
")",
"return",
"terms"
] | A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present. | [
"A",
"list",
"of",
"terms",
"that",
"are",
"outputs",
"of",
"this",
"pipeline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L255-L266 |
25,818 | quantopian/zipline | zipline/pipeline/pipeline.py | Pipeline.domain | def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred | python | def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred | [
"def",
"domain",
"(",
"self",
",",
"default",
")",
":",
"# Always compute our inferred domain to ensure that it's compatible",
"# with our explicit domain.",
"inferred",
"=",
"infer_domain",
"(",
"self",
".",
"_output_terms",
")",
"if",
"inferred",
"is",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"GENERIC",
":",
"# Both generic. Fall back to default.",
"return",
"default",
"elif",
"inferred",
"is",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"not",
"GENERIC",
":",
"# Use the non-generic domain.",
"return",
"self",
".",
"_domain",
"elif",
"inferred",
"is",
"not",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"GENERIC",
":",
"# Use the non-generic domain.",
"return",
"inferred",
"else",
":",
"# Both non-generic. They have to match.",
"if",
"inferred",
"is",
"not",
"self",
".",
"_domain",
":",
"raise",
"ValueError",
"(",
"\"Conflicting domains in Pipeline. Inferred {}, but {} was \"",
"\"passed at construction.\"",
".",
"format",
"(",
"inferred",
",",
"self",
".",
"_domain",
")",
")",
"return",
"inferred"
] | Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain. | [
"Get",
"the",
"domain",
"for",
"this",
"pipeline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L269-L314 |
25,819 | quantopian/zipline | zipline/pipeline/expression.py | _ensure_element | def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup) | python | def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup) | [
"def",
"_ensure_element",
"(",
"tup",
",",
"elem",
")",
":",
"try",
":",
"return",
"tup",
",",
"tup",
".",
"index",
"(",
"elem",
")",
"except",
"ValueError",
":",
"return",
"tuple",
"(",
"chain",
"(",
"tup",
",",
"(",
"elem",
",",
")",
")",
")",
",",
"len",
"(",
"tup",
")"
] | Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple. | [
"Create",
"a",
"tuple",
"containing",
"all",
"elements",
"of",
"tup",
"plus",
"elem",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L92-L101 |
25,820 | quantopian/zipline | zipline/pipeline/expression.py | NumericalExpression._compute | def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out | python | def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out | [
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"out",
"=",
"full",
"(",
"mask",
".",
"shape",
",",
"self",
".",
"missing_value",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"# This writes directly into our output buffer.",
"numexpr",
".",
"evaluate",
"(",
"self",
".",
"_expr",
",",
"local_dict",
"=",
"{",
"\"x_%d\"",
"%",
"idx",
":",
"array",
"for",
"idx",
",",
"array",
"in",
"enumerate",
"(",
"arrays",
")",
"}",
",",
"global_dict",
"=",
"{",
"'inf'",
":",
"inf",
"}",
",",
"out",
"=",
"out",
",",
")",
"return",
"out"
] | Compute our stored expression string with numexpr. | [
"Compute",
"our",
"stored",
"expression",
"string",
"with",
"numexpr",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L238-L253 |
25,821 | quantopian/zipline | zipline/pipeline/expression.py | NumericalExpression._rebind_variables | def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_") | python | def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_") | [
"def",
"_rebind_variables",
"(",
"self",
",",
"new_inputs",
")",
":",
"expr",
"=",
"self",
".",
"_expr",
"# If we have 11+ variables, some of our variable names may be",
"# substrings of other variable names. For example, we might have x_1,",
"# x_10, and x_100. By enumerating in reverse order, we ensure that",
"# every variable name which is a substring of another variable name is",
"# processed after the variable of which it is a substring. This",
"# guarantees that the substitution of any given variable index only",
"# ever affects exactly its own index. For example, if we have variables",
"# with indices going up to 100, we will process all of the x_1xx names",
"# before x_1x, which will be before x_1, so the substitution of x_1",
"# will not affect x_1x, which will not affect x_1xx.",
"for",
"idx",
",",
"input_",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"self",
".",
"inputs",
")",
")",
")",
":",
"old_varname",
"=",
"\"x_%d\"",
"%",
"idx",
"# Temporarily rebind to x_temp_N so that we don't overwrite the",
"# same value multiple times.",
"temp_new_varname",
"=",
"\"x_temp_%d\"",
"%",
"new_inputs",
".",
"index",
"(",
"input_",
")",
"expr",
"=",
"expr",
".",
"replace",
"(",
"old_varname",
",",
"temp_new_varname",
")",
"# Clear out the temp variables now that we've finished iteration.",
"return",
"expr",
".",
"replace",
"(",
"\"_temp_\"",
",",
"\"_\"",
")"
] | Return self._expr with all variables rebound to the indices implied by
new_inputs. | [
"Return",
"self",
".",
"_expr",
"with",
"all",
"variables",
"rebound",
"to",
"the",
"indices",
"implied",
"by",
"new_inputs",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L255-L279 |
25,822 | quantopian/zipline | zipline/pipeline/expression.py | NumericalExpression._merge_expressions | def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs | python | def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs | [
"def",
"_merge_expressions",
"(",
"self",
",",
"other",
")",
":",
"new_inputs",
"=",
"tuple",
"(",
"set",
"(",
"self",
".",
"inputs",
")",
".",
"union",
"(",
"other",
".",
"inputs",
")",
")",
"new_self_expr",
"=",
"self",
".",
"_rebind_variables",
"(",
"new_inputs",
")",
"new_other_expr",
"=",
"other",
".",
"_rebind_variables",
"(",
"new_inputs",
")",
"return",
"new_self_expr",
",",
"new_other_expr",
",",
"new_inputs"
] | Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs) | [
"Merge",
"the",
"inputs",
"of",
"two",
"NumericalExpressions",
"into",
"a",
"single",
"input",
"tuple",
"rewriting",
"their",
"respective",
"string",
"expressions",
"to",
"make",
"input",
"names",
"resolve",
"correctly",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L281-L292 |
25,823 | quantopian/zipline | zipline/pipeline/expression.py | NumericalExpression.build_binary_op | def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs | python | def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs | [
"def",
"build_binary_op",
"(",
"self",
",",
"op",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"NumericalExpression",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"_merge_expressions",
"(",
"other",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Term",
")",
":",
"self_expr",
"=",
"self",
".",
"_expr",
"new_inputs",
",",
"other_idx",
"=",
"_ensure_element",
"(",
"self",
".",
"inputs",
",",
"other",
")",
"other_expr",
"=",
"\"x_%d\"",
"%",
"other_idx",
"elif",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"self_expr",
"=",
"self",
".",
"_expr",
"other_expr",
"=",
"str",
"(",
"other",
")",
"new_inputs",
"=",
"self",
".",
"inputs",
"else",
":",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"other",
")",
"return",
"self_expr",
",",
"other_expr",
",",
"new_inputs"
] | Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator. | [
"Compute",
"new",
"expression",
"strings",
"and",
"a",
"new",
"inputs",
"tuple",
"for",
"combining",
"self",
"and",
"other",
"with",
"a",
"binary",
"operator",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L294-L311 |
25,824 | quantopian/zipline | zipline/pipeline/expression.py | NumericalExpression.graph_repr | def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
) | python | def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
) | [
"def",
"graph_repr",
"(",
"self",
")",
":",
"# Replace any floating point numbers in the expression",
"# with their scientific notation",
"final",
"=",
"re",
".",
"sub",
"(",
"r\"[-+]?\\d*\\.\\d+\"",
",",
"lambda",
"x",
":",
"format",
"(",
"float",
"(",
"x",
".",
"group",
"(",
"0",
")",
")",
",",
"'.2E'",
")",
",",
"self",
".",
"_expr",
")",
"# Graphviz interprets `\\l` as \"divide label into lines, left-justified\"",
"return",
"\"Expression:\\\\l {}\\\\l\"",
".",
"format",
"(",
"final",
",",
")"
] | Short repr to use when rendering Pipeline graphs. | [
"Short",
"repr",
"to",
"use",
"when",
"rendering",
"Pipeline",
"graphs",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L327-L338 |
25,825 | quantopian/zipline | zipline/utils/paths.py | last_modified_time | def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') | python | def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') | [
"def",
"last_modified_time",
"(",
"path",
")",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")",
",",
"unit",
"=",
"'s'",
",",
"tz",
"=",
"'UTC'",
")"
] | Get the last modified time of path as a Timestamp. | [
"Get",
"the",
"last",
"modified",
"time",
"of",
"path",
"as",
"a",
"Timestamp",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/paths.py#L78-L82 |
25,826 | quantopian/zipline | zipline/utils/paths.py | zipline_root | def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root | python | def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root | [
"def",
"zipline_root",
"(",
"environ",
"=",
"None",
")",
":",
"if",
"environ",
"is",
"None",
":",
"environ",
"=",
"os",
".",
"environ",
"root",
"=",
"environ",
".",
"get",
"(",
"'ZIPLINE_ROOT'",
",",
"None",
")",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"expanduser",
"(",
"'~/.zipline'",
")",
"return",
"root"
] | Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir. | [
"Get",
"the",
"root",
"directory",
"for",
"all",
"zipline",
"-",
"managed",
"files",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/paths.py#L107-L131 |
25,827 | quantopian/zipline | zipline/pipeline/loaders/frame.py | DataFrameLoader.format_adjustments | def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out | python | def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out | [
"def",
"format_adjustments",
"(",
"self",
",",
"dates",
",",
"assets",
")",
":",
"make_adjustment",
"=",
"partial",
"(",
"make_adjustment_from_labels",
",",
"dates",
",",
"assets",
")",
"min_date",
",",
"max_date",
"=",
"dates",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"# TODO: Consider porting this to Cython.",
"if",
"len",
"(",
"self",
".",
"adjustments",
")",
"==",
"0",
":",
"return",
"{",
"}",
"# Mask for adjustments whose apply_dates are in the requested window of",
"# dates.",
"date_bounds",
"=",
"self",
".",
"adjustment_apply_dates",
".",
"slice_indexer",
"(",
"min_date",
",",
"max_date",
",",
")",
"dates_filter",
"=",
"zeros",
"(",
"len",
"(",
"self",
".",
"adjustments",
")",
",",
"dtype",
"=",
"'bool'",
")",
"dates_filter",
"[",
"date_bounds",
"]",
"=",
"True",
"# Ignore adjustments whose apply_date is in range, but whose end_date",
"# is out of range.",
"dates_filter",
"&=",
"(",
"self",
".",
"adjustment_end_dates",
">=",
"min_date",
")",
"# Mask for adjustments whose sids are in the requested assets.",
"sids_filter",
"=",
"self",
".",
"adjustment_sids",
".",
"isin",
"(",
"assets",
".",
"values",
")",
"adjustments_to_use",
"=",
"self",
".",
"adjustments",
".",
"loc",
"[",
"dates_filter",
"&",
"sids_filter",
"]",
".",
"set_index",
"(",
"'apply_date'",
")",
"# For each apply_date on which we have an adjustment, compute",
"# the integer index of that adjustment's apply_date in `dates`.",
"# Then build a list of Adjustment objects for that apply_date.",
"# This logic relies on the sorting applied on the previous line.",
"out",
"=",
"{",
"}",
"previous_apply_date",
"=",
"object",
"(",
")",
"for",
"row",
"in",
"adjustments_to_use",
".",
"itertuples",
"(",
")",
":",
"# This expansion depends on the ordering of the DataFrame columns,",
"# defined above.",
"apply_date",
",",
"sid",
",",
"value",
",",
"kind",
",",
"start_date",
",",
"end_date",
"=",
"row",
"if",
"apply_date",
"!=",
"previous_apply_date",
":",
"# Get the next apply date if no exact match.",
"row_loc",
"=",
"dates",
".",
"get_loc",
"(",
"apply_date",
",",
"method",
"=",
"'bfill'",
")",
"current_date_adjustments",
"=",
"out",
"[",
"row_loc",
"]",
"=",
"[",
"]",
"previous_apply_date",
"=",
"apply_date",
"# Look up the approprate Adjustment constructor based on the value",
"# of `kind`.",
"current_date_adjustments",
".",
"append",
"(",
"make_adjustment",
"(",
"start_date",
",",
"end_date",
",",
"sid",
",",
"kind",
",",
"value",
")",
")",
"return",
"out"
] | Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
} | [
"Build",
"a",
"dict",
"of",
"Adjustment",
"objects",
"in",
"the",
"format",
"expected",
"by",
"AdjustedArray",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L83-L147 |
25,828 | quantopian/zipline | zipline/pipeline/loaders/frame.py | DataFrameLoader.load_adjusted_array | def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
} | python | def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
} | [
"def",
"load_adjusted_array",
"(",
"self",
",",
"domain",
",",
"columns",
",",
"dates",
",",
"sids",
",",
"mask",
")",
":",
"if",
"len",
"(",
"columns",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Can't load multiple columns with DataFrameLoader\"",
")",
"column",
"=",
"columns",
"[",
"0",
"]",
"self",
".",
"_validate_input_column",
"(",
"column",
")",
"date_indexer",
"=",
"self",
".",
"dates",
".",
"get_indexer",
"(",
"dates",
")",
"assets_indexer",
"=",
"self",
".",
"assets",
".",
"get_indexer",
"(",
"sids",
")",
"# Boolean arrays with True on matched entries",
"good_dates",
"=",
"(",
"date_indexer",
"!=",
"-",
"1",
")",
"good_assets",
"=",
"(",
"assets_indexer",
"!=",
"-",
"1",
")",
"data",
"=",
"self",
".",
"baseline",
"[",
"ix_",
"(",
"date_indexer",
",",
"assets_indexer",
")",
"]",
"mask",
"=",
"(",
"good_assets",
"&",
"as_column",
"(",
"good_dates",
")",
")",
"&",
"mask",
"# Mask out requested columns/rows that didn't match.",
"data",
"[",
"~",
"mask",
"]",
"=",
"column",
".",
"missing_value",
"return",
"{",
"column",
":",
"AdjustedArray",
"(",
"# Pull out requested columns/rows from our baseline data.",
"data",
"=",
"data",
",",
"adjustments",
"=",
"self",
".",
"format_adjustments",
"(",
"dates",
",",
"sids",
")",
",",
"missing_value",
"=",
"column",
".",
"missing_value",
",",
")",
",",
"}"
] | Load data from our stored baseline. | [
"Load",
"data",
"from",
"our",
"stored",
"baseline",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L149-L181 |
25,829 | quantopian/zipline | zipline/pipeline/loaders/frame.py | DataFrameLoader._validate_input_column | def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column) | python | def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column) | [
"def",
"_validate_input_column",
"(",
"self",
",",
"column",
")",
":",
"if",
"column",
"!=",
"self",
".",
"column",
"and",
"column",
".",
"unspecialize",
"(",
")",
"!=",
"self",
".",
"column",
":",
"raise",
"ValueError",
"(",
"\"Can't load unknown column %s\"",
"%",
"column",
")"
] | Make sure a passed column is our column. | [
"Make",
"sure",
"a",
"passed",
"column",
"is",
"our",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L183-L187 |
25,830 | quantopian/zipline | zipline/utils/security_list.py | load_from_directory | def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data | python | def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data | [
"def",
"load_from_directory",
"(",
"list_name",
")",
":",
"data",
"=",
"{",
"}",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"SECURITY_LISTS_DIR",
",",
"list_name",
")",
"for",
"kd_name",
"in",
"listdir",
"(",
"dir_path",
")",
":",
"kd",
"=",
"datetime",
".",
"strptime",
"(",
"kd_name",
",",
"DATE_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
"data",
"[",
"kd",
"]",
"=",
"{",
"}",
"kd_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"kd_name",
")",
"for",
"ld_name",
"in",
"listdir",
"(",
"kd_path",
")",
":",
"ld",
"=",
"datetime",
".",
"strptime",
"(",
"ld_name",
",",
"DATE_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
"data",
"[",
"kd",
"]",
"[",
"ld",
"]",
"=",
"{",
"}",
"ld_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"kd_path",
",",
"ld_name",
")",
"for",
"fname",
"in",
"listdir",
"(",
"ld_path",
")",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ld_path",
",",
"fname",
")",
"with",
"open",
"(",
"fpath",
")",
"as",
"f",
":",
"symbols",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"data",
"[",
"kd",
"]",
"[",
"ld",
"]",
"[",
"fname",
"]",
"=",
"symbols",
"return",
"data"
] | To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]} | [
"To",
"resolve",
"the",
"symbol",
"in",
"the",
"LEVERAGED_ETF",
"list",
"the",
"date",
"on",
"which",
"the",
"symbol",
"was",
"in",
"effect",
"is",
"needed",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/security_list.py#L123-L159 |
25,831 | quantopian/zipline | zipline/utils/memoize.py | weak_lru_cache | def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc | python | def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc | [
"def",
"weak_lru_cache",
"(",
"maxsize",
"=",
"100",
")",
":",
"class",
"desc",
"(",
"lazyval",
")",
":",
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"owner",
")",
":",
"if",
"instance",
"is",
"None",
":",
"return",
"self",
"try",
":",
"return",
"self",
".",
"_cache",
"[",
"instance",
"]",
"except",
"KeyError",
":",
"inst",
"=",
"ref",
"(",
"instance",
")",
"@",
"_weak_lru_cache",
"(",
"maxsize",
")",
"@",
"wraps",
"(",
"self",
".",
"_get",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get",
"(",
"inst",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_cache",
"[",
"instance",
"]",
"=",
"wrapper",
"return",
"wrapper",
"@",
"_weak_lru_cache",
"(",
"maxsize",
")",
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"desc"
] | Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used | [
"Weak",
"least",
"-",
"recently",
"-",
"used",
"cache",
"decorator",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/memoize.py#L211-L248 |
25,832 | quantopian/zipline | zipline/pipeline/data/dataset.py | Column.bind | def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
) | python | def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
) | [
"def",
"bind",
"(",
"self",
",",
"name",
")",
":",
"return",
"_BoundColumnDescr",
"(",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"name",
"=",
"name",
",",
"doc",
"=",
"self",
".",
"doc",
",",
"metadata",
"=",
"self",
".",
"metadata",
",",
")"
] | Bind a `Column` object to its name. | [
"Bind",
"a",
"Column",
"object",
"to",
"its",
"name",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L49-L59 |
25,833 | quantopian/zipline | zipline/pipeline/data/dataset.py | BoundColumn.specialize | def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return type(self)(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset.specialize(domain),
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
) | python | def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return type(self)(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset.specialize(domain),
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
) | [
"def",
"specialize",
"(",
"self",
",",
"domain",
")",
":",
"if",
"domain",
"==",
"self",
".",
"domain",
":",
"return",
"self",
"return",
"type",
"(",
"self",
")",
"(",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"dataset",
"=",
"self",
".",
"_dataset",
".",
"specialize",
"(",
"domain",
")",
",",
"name",
"=",
"self",
".",
"_name",
",",
"doc",
"=",
"self",
".",
"__doc__",
",",
"metadata",
"=",
"self",
".",
"_metadata",
",",
")"
] | Specialize ``self`` to a concrete domain. | [
"Specialize",
"self",
"to",
"a",
"concrete",
"domain",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L177-L190 |
25,834 | quantopian/zipline | zipline/pipeline/data/dataset.py | DataSet.get_column | def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls) | python | def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls) | [
"def",
"get_column",
"(",
"cls",
",",
"name",
")",
":",
"clsdict",
"=",
"vars",
"(",
"cls",
")",
"try",
":",
"maybe_column",
"=",
"clsdict",
"[",
"name",
"]",
"if",
"not",
"isinstance",
"(",
"maybe_column",
",",
"_BoundColumnDescr",
")",
":",
"raise",
"KeyError",
"(",
"name",
")",
"except",
"KeyError",
":",
"raise",
"AttributeError",
"(",
"\"{dset} has no column {colname!r}:\\n\\n\"",
"\"Possible choices are:\\n\"",
"\"{choices}\"",
".",
"format",
"(",
"dset",
"=",
"cls",
".",
"qualname",
",",
"colname",
"=",
"name",
",",
"choices",
"=",
"bulleted_list",
"(",
"sorted",
"(",
"cls",
".",
"_column_names",
")",
",",
"max_count",
"=",
"10",
",",
")",
",",
")",
")",
"# Resolve column descriptor into a BoundColumn.",
"return",
"maybe_column",
".",
"__get__",
"(",
"None",
",",
"cls",
")"
] | Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists. | [
"Look",
"up",
"a",
"column",
"by",
"name",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L502-L540 |
25,835 | quantopian/zipline | zipline/pipeline/data/dataset.py | DataSetFamily._make_dataset | def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice | python | def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice | [
"def",
"_make_dataset",
"(",
"cls",
",",
"coords",
")",
":",
"class",
"Slice",
"(",
"cls",
".",
"_SliceType",
")",
":",
"extra_coords",
"=",
"coords",
"Slice",
".",
"__name__",
"=",
"'%s.slice(%s)'",
"%",
"(",
"cls",
".",
"__name__",
",",
"', '",
".",
"join",
"(",
"'%s=%r'",
"%",
"item",
"for",
"item",
"in",
"coords",
".",
"items",
"(",
")",
")",
",",
")",
"return",
"Slice"
] | Construct a new dataset given the coordinates. | [
"Construct",
"a",
"new",
"dataset",
"given",
"the",
"coordinates",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L813-L823 |
25,836 | quantopian/zipline | zipline/pipeline/data/dataset.py | DataSetFamily.slice | def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice | python | def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice | [
"def",
"slice",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"coords",
",",
"hash_key",
"=",
"cls",
".",
"_canonical_key",
"(",
"args",
",",
"kwargs",
")",
"try",
":",
"return",
"cls",
".",
"_slice_cache",
"[",
"hash_key",
"]",
"except",
"KeyError",
":",
"pass",
"Slice",
"=",
"cls",
".",
"_make_dataset",
"(",
"coords",
")",
"cls",
".",
"_slice_cache",
"[",
"hash_key",
"]",
"=",
"Slice",
"return",
"Slice"
] | Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute. | [
"Take",
"a",
"slice",
"of",
"a",
"DataSetFamily",
"to",
"produce",
"a",
"dataset",
"indexed",
"by",
"asset",
"and",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L826-L854 |
25,837 | quantopian/zipline | zipline/pipeline/loaders/synthetic.py | PrecomputedLoader.load_adjusted_array | def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders.get(col)
if loader is None:
loader = self._loaders[col.unspecialize()]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array(domain, [col], dates, sids, mask)
)
return out | python | def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders.get(col)
if loader is None:
loader = self._loaders[col.unspecialize()]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array(domain, [col], dates, sids, mask)
)
return out | [
"def",
"load_adjusted_array",
"(",
"self",
",",
"domain",
",",
"columns",
",",
"dates",
",",
"sids",
",",
"mask",
")",
":",
"out",
"=",
"{",
"}",
"for",
"col",
"in",
"columns",
":",
"try",
":",
"loader",
"=",
"self",
".",
"_loaders",
".",
"get",
"(",
"col",
")",
"if",
"loader",
"is",
"None",
":",
"loader",
"=",
"self",
".",
"_loaders",
"[",
"col",
".",
"unspecialize",
"(",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find loader for %s\"",
"%",
"col",
")",
"out",
".",
"update",
"(",
"loader",
".",
"load_adjusted_array",
"(",
"domain",
",",
"[",
"col",
"]",
",",
"dates",
",",
"sids",
",",
"mask",
")",
")",
"return",
"out"
] | Load by delegating to sub-loaders. | [
"Load",
"by",
"delegating",
"to",
"sub",
"-",
"loaders",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L82-L97 |
25,838 | quantopian/zipline | zipline/pipeline/loaders/synthetic.py | SeededRandomLoader._float_values | def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape) | python | def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape) | [
"def",
"_float_values",
"(",
"self",
",",
"shape",
")",
":",
"return",
"self",
".",
"state",
".",
"uniform",
"(",
"low",
"=",
"0.0",
",",
"high",
"=",
"100.0",
",",
"size",
"=",
"shape",
")"
] | Return uniformly-distributed floats between -0.0 and 100.0. | [
"Return",
"uniformly",
"-",
"distributed",
"floats",
"between",
"-",
"0",
".",
"0",
"and",
"100",
".",
"0",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L170-L174 |
25,839 | quantopian/zipline | zipline/pipeline/loaders/synthetic.py | SeededRandomLoader._int_values | def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64')) | python | def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64')) | [
"def",
"_int_values",
"(",
"self",
",",
"shape",
")",
":",
"return",
"(",
"self",
".",
"state",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"100",
",",
"size",
"=",
"shape",
")",
".",
"astype",
"(",
"'int64'",
")",
")"
] | Return uniformly-distributed integers between 0 and 100. | [
"Return",
"uniformly",
"-",
"distributed",
"integers",
"between",
"0",
"and",
"100",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L176-L181 |
25,840 | quantopian/zipline | zipline/pipeline/loaders/synthetic.py | SeededRandomLoader._datetime_values | def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets | python | def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets | [
"def",
"_datetime_values",
"(",
"self",
",",
"shape",
")",
":",
"start",
"=",
"Timestamp",
"(",
"'2014'",
",",
"tz",
"=",
"'UTC'",
")",
".",
"asm8",
"offsets",
"=",
"self",
".",
"state",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"364",
",",
"size",
"=",
"shape",
",",
")",
".",
"astype",
"(",
"'timedelta64[D]'",
")",
"return",
"start",
"+",
"offsets"
] | Return uniformly-distributed dates in 2014. | [
"Return",
"uniformly",
"-",
"distributed",
"dates",
"in",
"2014",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L183-L193 |
25,841 | quantopian/zipline | zipline/lib/quantiles.py | quantiles | def quantiles(data, nbins_or_partition_bounds):
"""
Compute rowwise array quantiles on an input.
"""
return apply_along_axis(
qcut,
1,
data,
q=nbins_or_partition_bounds, labels=False,
) | python | def quantiles(data, nbins_or_partition_bounds):
"""
Compute rowwise array quantiles on an input.
"""
return apply_along_axis(
qcut,
1,
data,
q=nbins_or_partition_bounds, labels=False,
) | [
"def",
"quantiles",
"(",
"data",
",",
"nbins_or_partition_bounds",
")",
":",
"return",
"apply_along_axis",
"(",
"qcut",
",",
"1",
",",
"data",
",",
"q",
"=",
"nbins_or_partition_bounds",
",",
"labels",
"=",
"False",
",",
")"
] | Compute rowwise array quantiles on an input. | [
"Compute",
"rowwise",
"array",
"quantiles",
"on",
"an",
"input",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/quantiles.py#L8-L17 |
25,842 | quantopian/zipline | zipline/finance/metrics/tracker.py | MetricsTracker.handle_minute_close | def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
"""
self.sync_last_sale_prices(dt, data_portal)
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'minute_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_bar(self._session_count)
self.end_of_bar(
packet,
ledger,
dt,
self._session_count,
data_portal,
)
return packet | python | def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
"""
self.sync_last_sale_prices(dt, data_portal)
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'minute_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_bar(self._session_count)
self.end_of_bar(
packet,
ledger,
dt,
self._session_count,
data_portal,
)
return packet | [
"def",
"handle_minute_close",
"(",
"self",
",",
"dt",
",",
"data_portal",
")",
":",
"self",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"data_portal",
")",
"packet",
"=",
"{",
"'period_start'",
":",
"self",
".",
"_first_session",
",",
"'period_end'",
":",
"self",
".",
"_last_session",
",",
"'capital_base'",
":",
"self",
".",
"_capital_base",
",",
"'minute_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_market_open",
",",
"'period_close'",
":",
"dt",
",",
"}",
",",
"'cumulative_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_first_session",
",",
"'period_close'",
":",
"self",
".",
"_last_session",
",",
"}",
",",
"'progress'",
":",
"self",
".",
"_progress",
"(",
"self",
")",
",",
"'cumulative_risk_metrics'",
":",
"{",
"}",
",",
"}",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"end_of_bar",
"(",
"self",
".",
"_session_count",
")",
"self",
".",
"end_of_bar",
"(",
"packet",
",",
"ledger",
",",
"dt",
",",
"self",
".",
"_session_count",
",",
"data_portal",
",",
")",
"return",
"packet"
] | Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet. | [
"Handles",
"the",
"close",
"of",
"the",
"given",
"minute",
"in",
"minute",
"emission",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L204-L243 |
25,843 | quantopian/zipline | zipline/finance/metrics/tracker.py | MetricsTracker.handle_market_open | def handle_market_open(self, session_label, data_portal):
"""Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
"""
ledger = self._ledger
ledger.start_of_session(session_label)
adjustment_reader = data_portal.adjustment_reader
if adjustment_reader is not None:
# this is None when running with a dataframe source
ledger.process_dividends(
session_label,
self._asset_finder,
adjustment_reader,
)
self._current_session = session_label
cal = self._trading_calendar
self._market_open, self._market_close = self._execution_open_and_close(
cal,
session_label,
)
self.start_of_session(ledger, session_label, data_portal) | python | def handle_market_open(self, session_label, data_portal):
"""Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
"""
ledger = self._ledger
ledger.start_of_session(session_label)
adjustment_reader = data_portal.adjustment_reader
if adjustment_reader is not None:
# this is None when running with a dataframe source
ledger.process_dividends(
session_label,
self._asset_finder,
adjustment_reader,
)
self._current_session = session_label
cal = self._trading_calendar
self._market_open, self._market_close = self._execution_open_and_close(
cal,
session_label,
)
self.start_of_session(ledger, session_label, data_portal) | [
"def",
"handle_market_open",
"(",
"self",
",",
"session_label",
",",
"data_portal",
")",
":",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"start_of_session",
"(",
"session_label",
")",
"adjustment_reader",
"=",
"data_portal",
".",
"adjustment_reader",
"if",
"adjustment_reader",
"is",
"not",
"None",
":",
"# this is None when running with a dataframe source",
"ledger",
".",
"process_dividends",
"(",
"session_label",
",",
"self",
".",
"_asset_finder",
",",
"adjustment_reader",
",",
")",
"self",
".",
"_current_session",
"=",
"session_label",
"cal",
"=",
"self",
".",
"_trading_calendar",
"self",
".",
"_market_open",
",",
"self",
".",
"_market_close",
"=",
"self",
".",
"_execution_open_and_close",
"(",
"cal",
",",
"session_label",
",",
")",
"self",
".",
"start_of_session",
"(",
"ledger",
",",
"session_label",
",",
"data_portal",
")"
] | Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal. | [
"Handles",
"the",
"start",
"of",
"each",
"session",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L245-L275 |
25,844 | quantopian/zipline | zipline/finance/metrics/tracker.py | MetricsTracker.handle_market_close | def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'daily_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet | python | def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'daily_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet | [
"def",
"handle_market_close",
"(",
"self",
",",
"dt",
",",
"data_portal",
")",
":",
"completed_session",
"=",
"self",
".",
"_current_session",
"if",
"self",
".",
"emission_rate",
"==",
"'daily'",
":",
"# this method is called for both minutely and daily emissions, but",
"# this chunk of code here only applies for daily emissions. (since",
"# it's done every minute, elsewhere, for minutely emission).",
"self",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"data_portal",
")",
"session_ix",
"=",
"self",
".",
"_session_count",
"# increment the day counter before we move markers forward.",
"self",
".",
"_session_count",
"+=",
"1",
"packet",
"=",
"{",
"'period_start'",
":",
"self",
".",
"_first_session",
",",
"'period_end'",
":",
"self",
".",
"_last_session",
",",
"'capital_base'",
":",
"self",
".",
"_capital_base",
",",
"'daily_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_market_open",
",",
"'period_close'",
":",
"dt",
",",
"}",
",",
"'cumulative_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_first_session",
",",
"'period_close'",
":",
"self",
".",
"_last_session",
",",
"}",
",",
"'progress'",
":",
"self",
".",
"_progress",
"(",
"self",
")",
",",
"'cumulative_risk_metrics'",
":",
"{",
"}",
",",
"}",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"end_of_session",
"(",
"session_ix",
")",
"self",
".",
"end_of_session",
"(",
"packet",
",",
"ledger",
",",
"completed_session",
",",
"session_ix",
",",
"data_portal",
",",
")",
"return",
"packet"
] | Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet. | [
"Handles",
"the",
"close",
"of",
"the",
"given",
"day",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L277-L328 |
25,845 | quantopian/zipline | zipline/finance/metrics/tracker.py | MetricsTracker.handle_simulation_end | def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet | python | def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet | [
"def",
"handle_simulation_end",
"(",
"self",
",",
"data_portal",
")",
":",
"log",
".",
"info",
"(",
"'Simulated {} trading days\\n'",
"'first open: {}\\n'",
"'last close: {}'",
",",
"self",
".",
"_session_count",
",",
"self",
".",
"_trading_calendar",
".",
"session_open",
"(",
"self",
".",
"_first_session",
")",
",",
"self",
".",
"_trading_calendar",
".",
"session_close",
"(",
"self",
".",
"_last_session",
")",
",",
")",
"packet",
"=",
"{",
"}",
"self",
".",
"end_of_simulation",
"(",
"packet",
",",
"self",
".",
"_ledger",
",",
"self",
".",
"_trading_calendar",
",",
"self",
".",
"_sessions",
",",
"data_portal",
",",
"self",
".",
"_benchmark_source",
",",
")",
"return",
"packet"
] | When the simulation is complete, run the full period risk report
and send it out on the results socket. | [
"When",
"the",
"simulation",
"is",
"complete",
"run",
"the",
"full",
"period",
"risk",
"report",
"and",
"send",
"it",
"out",
"on",
"the",
"results",
"socket",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L330-L353 |
25,846 | quantopian/zipline | zipline/extensions.py | create_args | def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name]) | python | def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name]) | [
"def",
"create_args",
"(",
"args",
",",
"root",
")",
":",
"extension_args",
"=",
"{",
"}",
"for",
"arg",
"in",
"args",
":",
"parse_extension_arg",
"(",
"arg",
",",
"extension_args",
")",
"for",
"name",
"in",
"sorted",
"(",
"extension_args",
",",
"key",
"=",
"len",
")",
":",
"path",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"update_namespace",
"(",
"root",
",",
"path",
",",
"extension_args",
"[",
"name",
"]",
")"
] | Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree | [
"Encapsulates",
"a",
"set",
"of",
"custom",
"command",
"line",
"arguments",
"in",
"key",
"=",
"value",
"or",
"key",
".",
"namespace",
"=",
"value",
"form",
"into",
"a",
"chain",
"of",
"Namespace",
"objects",
"where",
"each",
"next",
"level",
"is",
"an",
"attribute",
"of",
"the",
"Namespace",
"object",
"on",
"the",
"current",
"level"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L6-L28 |
25,847 | quantopian/zipline | zipline/extensions.py | parse_extension_arg | def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value | python | def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value | [
"def",
"parse_extension_arg",
"(",
"arg",
",",
"arg_dict",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^(([^\\d\\W]\\w*)(\\.[^\\d\\W]\\w*)*)=(.*)$'",
",",
"arg",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"invalid extension argument '%s', must be in key=value form\"",
"%",
"arg",
")",
"name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"value",
"=",
"match",
".",
"group",
"(",
"4",
")",
"arg_dict",
"[",
"name",
"]",
"=",
"value"
] | Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added | [
"Converts",
"argument",
"strings",
"in",
"key",
"=",
"value",
"or",
"key",
".",
"namespace",
"=",
"value",
"form",
"to",
"dictionary",
"entries"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L31-L53 |
25,848 | quantopian/zipline | zipline/extensions.py | update_namespace | def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name) | python | def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name) | [
"def",
"update_namespace",
"(",
"namespace",
",",
"path",
",",
"name",
")",
":",
"if",
"len",
"(",
"path",
")",
"==",
"1",
":",
"setattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
",",
"name",
")",
"else",
":",
"if",
"hasattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
":",
"if",
"isinstance",
"(",
"getattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"Conflicting assignments at namespace\"",
"\" level '%s'\"",
"%",
"path",
"[",
"0",
"]",
")",
"else",
":",
"a",
"=",
"Namespace",
"(",
")",
"setattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
",",
"a",
")",
"update_namespace",
"(",
"getattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
",",
"path",
"[",
"1",
":",
"]",
",",
"name",
")"
] | A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level | [
"A",
"recursive",
"function",
"that",
"takes",
"a",
"root",
"element",
"list",
"of",
"namespaces",
"and",
"the",
"value",
"being",
"stored",
"and",
"assigns",
"namespaces",
"to",
"the",
"root",
"object",
"via",
"a",
"chain",
"of",
"Namespace",
"objects",
"connected",
"through",
"attributes"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L56-L83 |
25,849 | quantopian/zipline | zipline/extensions.py | create_registry | def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface | python | def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface | [
"def",
"create_registry",
"(",
"interface",
")",
":",
"if",
"interface",
"in",
"custom_types",
":",
"raise",
"ValueError",
"(",
"'there is already a Registry instance '",
"'for the specified type'",
")",
"custom_types",
"[",
"interface",
"]",
"=",
"Registry",
"(",
"interface",
")",
"return",
"interface"
] | Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered. | [
"Create",
"a",
"new",
"registry",
"for",
"an",
"extensible",
"interface",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L244-L263 |
25,850 | quantopian/zipline | zipline/extensions.py | Registry.load | def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) | python | def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) | [
"def",
"load",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"self",
".",
"_factories",
"[",
"name",
"]",
"(",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"no %s factory registered under name %r, options are: %r\"",
"%",
"(",
"self",
".",
"interface",
".",
"__name__",
",",
"name",
",",
"sorted",
"(",
"self",
".",
"_factories",
")",
")",
",",
")"
] | Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered. | [
"Construct",
"an",
"object",
"from",
"a",
"registered",
"factory",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L110-L124 |
25,851 | quantopian/zipline | zipline/finance/commission.py | PerDollar.calculate | def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share | python | def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share | [
"def",
"calculate",
"(",
"self",
",",
"order",
",",
"transaction",
")",
":",
"cost_per_share",
"=",
"transaction",
".",
"price",
"*",
"self",
".",
"cost_per_dollar",
"return",
"abs",
"(",
"transaction",
".",
"amount",
")",
"*",
"cost_per_share"
] | Pay commission based on dollar value of shares. | [
"Pay",
"commission",
"based",
"on",
"dollar",
"value",
"of",
"shares",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/commission.py#L364-L369 |
25,852 | quantopian/zipline | zipline/finance/metrics/metric.py | _ClassicRiskMetrics.risk_metric_period | def risk_metric_period(cls,
start_session,
end_session,
algorithm_returns,
benchmark_returns,
algorithm_leverages):
"""
Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
}
"""
algorithm_returns = algorithm_returns[
(algorithm_returns.index >= start_session) &
(algorithm_returns.index <= end_session)
]
# Benchmark needs to be masked to the same dates as the algo returns
benchmark_returns = benchmark_returns[
(benchmark_returns.index >= start_session) &
(benchmark_returns.index <= algorithm_returns.index[-1])
]
benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]
alpha, beta = ep.alpha_beta_aligned(
algorithm_returns.values,
benchmark_returns.values,
)
sharpe = ep.sharpe_ratio(algorithm_returns)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(sharpe):
sharpe = 0.0
sortino = ep.sortino_ratio(
algorithm_returns.values,
_downside_risk=ep.downside_risk(algorithm_returns.values),
)
rval = {
'algorithm_period_return': algorithm_period_returns,
'benchmark_period_return': benchmark_period_returns,
'treasury_period_return': 0,
'excess_return': algorithm_period_returns,
'alpha': alpha,
'beta': beta,
'sharpe': sharpe,
'sortino': sortino,
'period_label': end_session.strftime("%Y-%m"),
'trading_days': len(benchmark_returns),
'algo_volatility': ep.annual_volatility(algorithm_returns),
'benchmark_volatility': ep.annual_volatility(benchmark_returns),
'max_drawdown': ep.max_drawdown(algorithm_returns.values),
'max_leverage': algorithm_leverages.max(),
}
# check if a field in rval is nan or inf, and replace it with None
# except period_label which is always a str
return {
k: (
None
if k != 'period_label' and not np.isfinite(v) else
v
)
for k, v in iteritems(rval)
} | python | def risk_metric_period(cls,
start_session,
end_session,
algorithm_returns,
benchmark_returns,
algorithm_leverages):
"""
Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
}
"""
algorithm_returns = algorithm_returns[
(algorithm_returns.index >= start_session) &
(algorithm_returns.index <= end_session)
]
# Benchmark needs to be masked to the same dates as the algo returns
benchmark_returns = benchmark_returns[
(benchmark_returns.index >= start_session) &
(benchmark_returns.index <= algorithm_returns.index[-1])
]
benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]
alpha, beta = ep.alpha_beta_aligned(
algorithm_returns.values,
benchmark_returns.values,
)
sharpe = ep.sharpe_ratio(algorithm_returns)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(sharpe):
sharpe = 0.0
sortino = ep.sortino_ratio(
algorithm_returns.values,
_downside_risk=ep.downside_risk(algorithm_returns.values),
)
rval = {
'algorithm_period_return': algorithm_period_returns,
'benchmark_period_return': benchmark_period_returns,
'treasury_period_return': 0,
'excess_return': algorithm_period_returns,
'alpha': alpha,
'beta': beta,
'sharpe': sharpe,
'sortino': sortino,
'period_label': end_session.strftime("%Y-%m"),
'trading_days': len(benchmark_returns),
'algo_volatility': ep.annual_volatility(algorithm_returns),
'benchmark_volatility': ep.annual_volatility(benchmark_returns),
'max_drawdown': ep.max_drawdown(algorithm_returns.values),
'max_leverage': algorithm_leverages.max(),
}
# check if a field in rval is nan or inf, and replace it with None
# except period_label which is always a str
return {
k: (
None
if k != 'period_label' and not np.isfinite(v) else
v
)
for k, v in iteritems(rval)
} | [
"def",
"risk_metric_period",
"(",
"cls",
",",
"start_session",
",",
"end_session",
",",
"algorithm_returns",
",",
"benchmark_returns",
",",
"algorithm_leverages",
")",
":",
"algorithm_returns",
"=",
"algorithm_returns",
"[",
"(",
"algorithm_returns",
".",
"index",
">=",
"start_session",
")",
"&",
"(",
"algorithm_returns",
".",
"index",
"<=",
"end_session",
")",
"]",
"# Benchmark needs to be masked to the same dates as the algo returns",
"benchmark_returns",
"=",
"benchmark_returns",
"[",
"(",
"benchmark_returns",
".",
"index",
">=",
"start_session",
")",
"&",
"(",
"benchmark_returns",
".",
"index",
"<=",
"algorithm_returns",
".",
"index",
"[",
"-",
"1",
"]",
")",
"]",
"benchmark_period_returns",
"=",
"ep",
".",
"cum_returns",
"(",
"benchmark_returns",
")",
".",
"iloc",
"[",
"-",
"1",
"]",
"algorithm_period_returns",
"=",
"ep",
".",
"cum_returns",
"(",
"algorithm_returns",
")",
".",
"iloc",
"[",
"-",
"1",
"]",
"alpha",
",",
"beta",
"=",
"ep",
".",
"alpha_beta_aligned",
"(",
"algorithm_returns",
".",
"values",
",",
"benchmark_returns",
".",
"values",
",",
")",
"sharpe",
"=",
"ep",
".",
"sharpe_ratio",
"(",
"algorithm_returns",
")",
"# The consumer currently expects a 0.0 value for sharpe in period,",
"# this differs from cumulative which was np.nan.",
"# When factoring out the sharpe_ratio, the different return types",
"# were collapsed into `np.nan`.",
"# TODO: Either fix consumer to accept `np.nan` or make the",
"# `sharpe_ratio` return type configurable.",
"# In the meantime, convert nan values to 0.0",
"if",
"pd",
".",
"isnull",
"(",
"sharpe",
")",
":",
"sharpe",
"=",
"0.0",
"sortino",
"=",
"ep",
".",
"sortino_ratio",
"(",
"algorithm_returns",
".",
"values",
",",
"_downside_risk",
"=",
"ep",
".",
"downside_risk",
"(",
"algorithm_returns",
".",
"values",
")",
",",
")",
"rval",
"=",
"{",
"'algorithm_period_return'",
":",
"algorithm_period_returns",
",",
"'benchmark_period_return'",
":",
"benchmark_period_returns",
",",
"'treasury_period_return'",
":",
"0",
",",
"'excess_return'",
":",
"algorithm_period_returns",
",",
"'alpha'",
":",
"alpha",
",",
"'beta'",
":",
"beta",
",",
"'sharpe'",
":",
"sharpe",
",",
"'sortino'",
":",
"sortino",
",",
"'period_label'",
":",
"end_session",
".",
"strftime",
"(",
"\"%Y-%m\"",
")",
",",
"'trading_days'",
":",
"len",
"(",
"benchmark_returns",
")",
",",
"'algo_volatility'",
":",
"ep",
".",
"annual_volatility",
"(",
"algorithm_returns",
")",
",",
"'benchmark_volatility'",
":",
"ep",
".",
"annual_volatility",
"(",
"benchmark_returns",
")",
",",
"'max_drawdown'",
":",
"ep",
".",
"max_drawdown",
"(",
"algorithm_returns",
".",
"values",
")",
",",
"'max_leverage'",
":",
"algorithm_leverages",
".",
"max",
"(",
")",
",",
"}",
"# check if a field in rval is nan or inf, and replace it with None",
"# except period_label which is always a str",
"return",
"{",
"k",
":",
"(",
"None",
"if",
"k",
"!=",
"'period_label'",
"and",
"not",
"np",
".",
"isfinite",
"(",
"v",
")",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"rval",
")",
"}"
] | Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
} | [
"Creates",
"a",
"dictionary",
"representing",
"the",
"state",
"of",
"the",
"risk",
"report",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/metric.py#L559-L666 |
25,853 | quantopian/zipline | zipline/assets/roll_finder.py | RollFinder._get_active_contract_at_offset | def _get_active_contract_at_offset(self, root_symbol, dt, offset):
"""
For the given root symbol, find the contract that is considered active
on a specific date at a specific offset.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if back is None:
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value) | python | def _get_active_contract_at_offset(self, root_symbol, dt, offset):
"""
For the given root symbol, find the contract that is considered active
on a specific date at a specific offset.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if back is None:
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value) | [
"def",
"_get_active_contract_at_offset",
"(",
"self",
",",
"root_symbol",
",",
"dt",
",",
"offset",
")",
":",
"oc",
"=",
"self",
".",
"asset_finder",
".",
"get_ordered_contracts",
"(",
"root_symbol",
")",
"session",
"=",
"self",
".",
"trading_calendar",
".",
"minute_to_session_label",
"(",
"dt",
")",
"front",
"=",
"oc",
".",
"contract_before_auto_close",
"(",
"session",
".",
"value",
")",
"back",
"=",
"oc",
".",
"contract_at_offset",
"(",
"front",
",",
"1",
",",
"dt",
".",
"value",
")",
"if",
"back",
"is",
"None",
":",
"return",
"front",
"primary",
"=",
"self",
".",
"_active_contract",
"(",
"oc",
",",
"front",
",",
"back",
",",
"session",
")",
"return",
"oc",
".",
"contract_at_offset",
"(",
"primary",
",",
"offset",
",",
"session",
".",
"value",
")"
] | For the given root symbol, find the contract that is considered active
on a specific date at a specific offset. | [
"For",
"the",
"given",
"root",
"symbol",
"find",
"the",
"contract",
"that",
"is",
"considered",
"active",
"on",
"a",
"specific",
"date",
"at",
"a",
"specific",
"offset",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L33-L45 |
25,854 | quantopian/zipline | zipline/assets/roll_finder.py | RollFinder.get_rolls | def get_rolls(self, root_symbol, start, end, offset):
"""
Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self._get_active_contract_at_offset(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if back is not None:
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start),
tc.minute_to_session_label(end))
freq = sessions.freq
if first == front:
# This is a bit tricky to grasp. Once we have the active contract
# on the given end date, we want to start walking backwards towards
# the start date and checking for rolls. For this, we treat the
# previous month's contract as the 'first' contract, and the
# contract we just found to be active as the 'back'. As we walk
# towards the start date, if the 'back' is no longer active, we add
# that date as a roll.
curr = first_contract << 1
else:
curr = first_contract << 2
session = sessions[-1]
while session > start and curr is not None:
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while session > start:
prev = session - freq
if prev_c is not None:
if prev < prev_c.contract.auto_close_date:
break
if back != self._active_contract(oc, front, back, prev):
# TODO: Instead of listing each contract with its roll date
# as tuples, create a series which maps every day to the
# active contract on that day.
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if curr is not None:
session = min(session, curr.contract.auto_close_date + freq)
return rolls | python | def get_rolls(self, root_symbol, start, end, offset):
"""
Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self._get_active_contract_at_offset(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if back is not None:
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start),
tc.minute_to_session_label(end))
freq = sessions.freq
if first == front:
# This is a bit tricky to grasp. Once we have the active contract
# on the given end date, we want to start walking backwards towards
# the start date and checking for rolls. For this, we treat the
# previous month's contract as the 'first' contract, and the
# contract we just found to be active as the 'back'. As we walk
# towards the start date, if the 'back' is no longer active, we add
# that date as a roll.
curr = first_contract << 1
else:
curr = first_contract << 2
session = sessions[-1]
while session > start and curr is not None:
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while session > start:
prev = session - freq
if prev_c is not None:
if prev < prev_c.contract.auto_close_date:
break
if back != self._active_contract(oc, front, back, prev):
# TODO: Instead of listing each contract with its roll date
# as tuples, create a series which maps every day to the
# active contract on that day.
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if curr is not None:
session = min(session, curr.contract.auto_close_date + freq)
return rolls | [
"def",
"get_rolls",
"(",
"self",
",",
"root_symbol",
",",
"start",
",",
"end",
",",
"offset",
")",
":",
"oc",
"=",
"self",
".",
"asset_finder",
".",
"get_ordered_contracts",
"(",
"root_symbol",
")",
"front",
"=",
"self",
".",
"_get_active_contract_at_offset",
"(",
"root_symbol",
",",
"end",
",",
"0",
")",
"back",
"=",
"oc",
".",
"contract_at_offset",
"(",
"front",
",",
"1",
",",
"end",
".",
"value",
")",
"if",
"back",
"is",
"not",
"None",
":",
"end_session",
"=",
"self",
".",
"trading_calendar",
".",
"minute_to_session_label",
"(",
"end",
")",
"first",
"=",
"self",
".",
"_active_contract",
"(",
"oc",
",",
"front",
",",
"back",
",",
"end_session",
")",
"else",
":",
"first",
"=",
"front",
"first_contract",
"=",
"oc",
".",
"sid_to_contract",
"[",
"first",
"]",
"rolls",
"=",
"[",
"(",
"(",
"first_contract",
">>",
"offset",
")",
".",
"contract",
".",
"sid",
",",
"None",
")",
"]",
"tc",
"=",
"self",
".",
"trading_calendar",
"sessions",
"=",
"tc",
".",
"sessions_in_range",
"(",
"tc",
".",
"minute_to_session_label",
"(",
"start",
")",
",",
"tc",
".",
"minute_to_session_label",
"(",
"end",
")",
")",
"freq",
"=",
"sessions",
".",
"freq",
"if",
"first",
"==",
"front",
":",
"# This is a bit tricky to grasp. Once we have the active contract",
"# on the given end date, we want to start walking backwards towards",
"# the start date and checking for rolls. For this, we treat the",
"# previous month's contract as the 'first' contract, and the",
"# contract we just found to be active as the 'back'. As we walk",
"# towards the start date, if the 'back' is no longer active, we add",
"# that date as a roll.",
"curr",
"=",
"first_contract",
"<<",
"1",
"else",
":",
"curr",
"=",
"first_contract",
"<<",
"2",
"session",
"=",
"sessions",
"[",
"-",
"1",
"]",
"while",
"session",
">",
"start",
"and",
"curr",
"is",
"not",
"None",
":",
"front",
"=",
"curr",
".",
"contract",
".",
"sid",
"back",
"=",
"rolls",
"[",
"0",
"]",
"[",
"0",
"]",
"prev_c",
"=",
"curr",
".",
"prev",
"while",
"session",
">",
"start",
":",
"prev",
"=",
"session",
"-",
"freq",
"if",
"prev_c",
"is",
"not",
"None",
":",
"if",
"prev",
"<",
"prev_c",
".",
"contract",
".",
"auto_close_date",
":",
"break",
"if",
"back",
"!=",
"self",
".",
"_active_contract",
"(",
"oc",
",",
"front",
",",
"back",
",",
"prev",
")",
":",
"# TODO: Instead of listing each contract with its roll date",
"# as tuples, create a series which maps every day to the",
"# active contract on that day.",
"rolls",
".",
"insert",
"(",
"0",
",",
"(",
"(",
"curr",
">>",
"offset",
")",
".",
"contract",
".",
"sid",
",",
"session",
")",
")",
"break",
"session",
"=",
"prev",
"curr",
"=",
"curr",
".",
"prev",
"if",
"curr",
"is",
"not",
"None",
":",
"session",
"=",
"min",
"(",
"session",
",",
"curr",
".",
"contract",
".",
"auto_close_date",
"+",
"freq",
")",
"return",
"rolls"
] | Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range. | [
"Get",
"the",
"rolls",
"i",
".",
"e",
".",
"the",
"session",
"at",
"which",
"to",
"hop",
"from",
"contract",
"to",
"contract",
"in",
"the",
"chain",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L66-L137 |
25,855 | quantopian/zipline | zipline/assets/roll_finder.py | VolumeRollFinder._active_contract | def _active_contract(self, oc, front, back, dt):
r"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract.
"""
front_contract = oc.sid_to_contract[front].contract
back_contract = oc.sid_to_contract[back].contract
tc = self.trading_calendar
trading_day = tc.day
prev = dt - trading_day
get_value = self.session_reader.get_value
# If the front contract is past its auto close date it cannot be the
# active contract, so return the back contract. Similarly, if the back
# contract has not even started yet, just return the front contract.
# The reason for using 'prev' to see if the contracts are alive instead
# of using 'dt' is because we need to get each contract's volume on the
# previous day, so we need to make sure that each contract exists on
# 'prev' in order to call 'get_value' below.
if dt > min(front_contract.auto_close_date, front_contract.end_date):
return back
elif front_contract.start_date > prev:
return back
elif dt > min(back_contract.auto_close_date, back_contract.end_date):
return front
elif back_contract.start_date > prev:
return front
front_vol = get_value(front, prev, 'volume')
back_vol = get_value(back, prev, 'volume')
if back_vol > front_vol:
return back
gap_start = max(
back_contract.start_date,
front_contract.auto_close_date - (trading_day * self.GRACE_DAYS),
)
gap_end = prev - trading_day
if dt < gap_start:
return front
# If we are within `self.GRACE_DAYS` of the front contract's auto close
# date, and a volume flip happened during that period, return the back
# contract as the active one.
sessions = tc.sessions_in_range(
tc.minute_to_session_label(gap_start),
tc.minute_to_session_label(gap_end),
)
for session in sessions:
front_vol = get_value(front, session, 'volume')
back_vol = get_value(back, session, 'volume')
if back_vol > front_vol:
return back
return front | python | def _active_contract(self, oc, front, back, dt):
r"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract.
"""
front_contract = oc.sid_to_contract[front].contract
back_contract = oc.sid_to_contract[back].contract
tc = self.trading_calendar
trading_day = tc.day
prev = dt - trading_day
get_value = self.session_reader.get_value
# If the front contract is past its auto close date it cannot be the
# active contract, so return the back contract. Similarly, if the back
# contract has not even started yet, just return the front contract.
# The reason for using 'prev' to see if the contracts are alive instead
# of using 'dt' is because we need to get each contract's volume on the
# previous day, so we need to make sure that each contract exists on
# 'prev' in order to call 'get_value' below.
if dt > min(front_contract.auto_close_date, front_contract.end_date):
return back
elif front_contract.start_date > prev:
return back
elif dt > min(back_contract.auto_close_date, back_contract.end_date):
return front
elif back_contract.start_date > prev:
return front
front_vol = get_value(front, prev, 'volume')
back_vol = get_value(back, prev, 'volume')
if back_vol > front_vol:
return back
gap_start = max(
back_contract.start_date,
front_contract.auto_close_date - (trading_day * self.GRACE_DAYS),
)
gap_end = prev - trading_day
if dt < gap_start:
return front
# If we are within `self.GRACE_DAYS` of the front contract's auto close
# date, and a volume flip happened during that period, return the back
# contract as the active one.
sessions = tc.sessions_in_range(
tc.minute_to_session_label(gap_start),
tc.minute_to_session_label(gap_end),
)
for session in sessions:
front_vol = get_value(front, session, 'volume')
back_vol = get_value(back, session, 'volume')
if back_vol > front_vol:
return back
return front | [
"def",
"_active_contract",
"(",
"self",
",",
"oc",
",",
"front",
",",
"back",
",",
"dt",
")",
":",
"front_contract",
"=",
"oc",
".",
"sid_to_contract",
"[",
"front",
"]",
".",
"contract",
"back_contract",
"=",
"oc",
".",
"sid_to_contract",
"[",
"back",
"]",
".",
"contract",
"tc",
"=",
"self",
".",
"trading_calendar",
"trading_day",
"=",
"tc",
".",
"day",
"prev",
"=",
"dt",
"-",
"trading_day",
"get_value",
"=",
"self",
".",
"session_reader",
".",
"get_value",
"# If the front contract is past its auto close date it cannot be the",
"# active contract, so return the back contract. Similarly, if the back",
"# contract has not even started yet, just return the front contract.",
"# The reason for using 'prev' to see if the contracts are alive instead",
"# of using 'dt' is because we need to get each contract's volume on the",
"# previous day, so we need to make sure that each contract exists on",
"# 'prev' in order to call 'get_value' below.",
"if",
"dt",
">",
"min",
"(",
"front_contract",
".",
"auto_close_date",
",",
"front_contract",
".",
"end_date",
")",
":",
"return",
"back",
"elif",
"front_contract",
".",
"start_date",
">",
"prev",
":",
"return",
"back",
"elif",
"dt",
">",
"min",
"(",
"back_contract",
".",
"auto_close_date",
",",
"back_contract",
".",
"end_date",
")",
":",
"return",
"front",
"elif",
"back_contract",
".",
"start_date",
">",
"prev",
":",
"return",
"front",
"front_vol",
"=",
"get_value",
"(",
"front",
",",
"prev",
",",
"'volume'",
")",
"back_vol",
"=",
"get_value",
"(",
"back",
",",
"prev",
",",
"'volume'",
")",
"if",
"back_vol",
">",
"front_vol",
":",
"return",
"back",
"gap_start",
"=",
"max",
"(",
"back_contract",
".",
"start_date",
",",
"front_contract",
".",
"auto_close_date",
"-",
"(",
"trading_day",
"*",
"self",
".",
"GRACE_DAYS",
")",
",",
")",
"gap_end",
"=",
"prev",
"-",
"trading_day",
"if",
"dt",
"<",
"gap_start",
":",
"return",
"front",
"# If we are within `self.GRACE_DAYS` of the front contract's auto close",
"# date, and a volume flip happened during that period, return the back",
"# contract as the active one.",
"sessions",
"=",
"tc",
".",
"sessions_in_range",
"(",
"tc",
".",
"minute_to_session_label",
"(",
"gap_start",
")",
",",
"tc",
".",
"minute_to_session_label",
"(",
"gap_end",
")",
",",
")",
"for",
"session",
"in",
"sessions",
":",
"front_vol",
"=",
"get_value",
"(",
"front",
",",
"session",
",",
"'volume'",
")",
"back_vol",
"=",
"get_value",
"(",
"back",
",",
"session",
",",
"'volume'",
")",
"if",
"back_vol",
">",
"front_vol",
":",
"return",
"back",
"return",
"front"
] | r"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract. | [
"r",
"Return",
"the",
"active",
"contract",
"based",
"on",
"the",
"previous",
"trading",
"day",
"s",
"volume",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L170-L241 |
25,856 | quantopian/zipline | zipline/lib/adjusted_array.py | _normalize_array | def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]').view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
) | python | def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]').view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
) | [
"def",
"_normalize_array",
"(",
"data",
",",
"missing_value",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"LabelArray",
")",
":",
"return",
"data",
",",
"{",
"}",
"data_dtype",
"=",
"data",
".",
"dtype",
"if",
"data_dtype",
"in",
"BOOL_DTYPES",
":",
"return",
"data",
".",
"astype",
"(",
"uint8",
")",
",",
"{",
"'dtype'",
":",
"dtype",
"(",
"bool_",
")",
"}",
"elif",
"data_dtype",
"in",
"FLOAT_DTYPES",
":",
"return",
"data",
".",
"astype",
"(",
"float64",
")",
",",
"{",
"'dtype'",
":",
"dtype",
"(",
"float64",
")",
"}",
"elif",
"data_dtype",
"in",
"INT_DTYPES",
":",
"return",
"data",
".",
"astype",
"(",
"int64",
")",
",",
"{",
"'dtype'",
":",
"dtype",
"(",
"int64",
")",
"}",
"elif",
"is_categorical",
"(",
"data_dtype",
")",
":",
"if",
"not",
"isinstance",
"(",
"missing_value",
",",
"LabelArray",
".",
"SUPPORTED_SCALAR_TYPES",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid missing_value for categorical array.\\n\"",
"\"Expected None, bytes or unicode. Got %r.\"",
"%",
"missing_value",
",",
")",
"return",
"LabelArray",
"(",
"data",
",",
"missing_value",
")",
",",
"{",
"}",
"elif",
"data_dtype",
".",
"kind",
"==",
"'M'",
":",
"try",
":",
"outarray",
"=",
"data",
".",
"astype",
"(",
"'datetime64[ns]'",
")",
".",
"view",
"(",
"'int64'",
")",
"return",
"outarray",
",",
"{",
"'dtype'",
":",
"datetime64ns_dtype",
"}",
"except",
"OverflowError",
":",
"raise",
"ValueError",
"(",
"\"AdjustedArray received a datetime array \"",
"\"not representable as datetime64[ns].\\n\"",
"\"Min Date: %s\\n\"",
"\"Max Date: %s\\n\"",
"%",
"(",
"data",
".",
"min",
"(",
")",
",",
"data",
".",
"max",
"(",
")",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Don't know how to construct AdjustedArray \"",
"\"on data of type %s.\"",
"%",
"data_dtype",
")"
] | Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype) | [
"Coerce",
"buffer",
"data",
"for",
"an",
"AdjustedArray",
"into",
"a",
"standard",
"scalar",
"representation",
"returning",
"the",
"coerced",
"array",
"and",
"a",
"dict",
"of",
"argument",
"to",
"pass",
"to",
"np",
".",
"view",
"to",
"use",
"when",
"providing",
"a",
"user",
"-",
"facing",
"view",
"of",
"the",
"underlying",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L84-L136 |
25,857 | quantopian/zipline | zipline/lib/adjusted_array.py | _merge_simple | def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
"""
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx] | python | def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
"""
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx] | [
"def",
"_merge_simple",
"(",
"adjustment_lists",
",",
"front_idx",
",",
"back_idx",
")",
":",
"if",
"len",
"(",
"adjustment_lists",
")",
"==",
"1",
":",
"return",
"list",
"(",
"adjustment_lists",
"[",
"0",
"]",
")",
"else",
":",
"return",
"adjustment_lists",
"[",
"front_idx",
"]",
"+",
"adjustment_lists",
"[",
"back_idx",
"]"
] | Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index. | [
"Merge",
"lists",
"of",
"new",
"and",
"existing",
"adjustments",
"for",
"a",
"given",
"index",
"by",
"appending",
"or",
"prepending",
"new",
"adjustments",
"to",
"existing",
"adjustments",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L139-L170 |
25,858 | quantopian/zipline | zipline/lib/adjusted_array.py | ensure_ndarray | def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
) | python | def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
) | [
"def",
"ensure_ndarray",
"(",
"ndarray_or_adjusted_array",
")",
":",
"if",
"isinstance",
"(",
"ndarray_or_adjusted_array",
",",
"ndarray",
")",
":",
"return",
"ndarray_or_adjusted_array",
"elif",
"isinstance",
"(",
"ndarray_or_adjusted_array",
",",
"AdjustedArray",
")",
":",
"return",
"ndarray_or_adjusted_array",
".",
"data",
"else",
":",
"raise",
"TypeError",
"(",
"\"Can't convert %s to ndarray\"",
"%",
"type",
"(",
"ndarray_or_adjusted_array",
")",
".",
"__name__",
")"
] | Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray. | [
"Return",
"the",
"input",
"as",
"a",
"numpy",
"ndarray",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L345-L368 |
25,859 | quantopian/zipline | zipline/lib/adjusted_array.py | _check_window_params | def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) | python | def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) | [
"def",
"_check_window_params",
"(",
"data",
",",
"window_length",
")",
":",
"if",
"window_length",
"<",
"1",
":",
"raise",
"WindowLengthNotPositive",
"(",
"window_length",
"=",
"window_length",
")",
"if",
"window_length",
">",
"data",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"WindowLengthTooLong",
"(",
"nrows",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"window_length",
"=",
"window_length",
",",
")"
] | Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`. | [
"Check",
"that",
"a",
"window",
"of",
"length",
"window_length",
"is",
"well",
"-",
"defined",
"on",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L371-L400 |
25,860 | quantopian/zipline | zipline/lib/adjusted_array.py | AdjustedArray.update_adjustments | def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
"""
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ', '.join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
) | python | def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
"""
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ', '.join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
) | [
"def",
"update_adjustments",
"(",
"self",
",",
"adjustments",
",",
"method",
")",
":",
"try",
":",
"merge_func",
"=",
"_merge_methods",
"[",
"method",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Invalid merge method %s\\n\"",
"\"Valid methods are: %s\"",
"%",
"(",
"method",
",",
"', '",
".",
"join",
"(",
"_merge_methods",
")",
")",
")",
"self",
".",
"adjustments",
"=",
"merge_with",
"(",
"merge_func",
",",
"self",
".",
"adjustments",
",",
"adjustments",
",",
")"
] | Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments. | [
"Merge",
"adjustments",
"with",
"existing",
"adjustments",
"handling",
"index",
"collisions",
"according",
"to",
"method",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L209-L236 |
25,861 | quantopian/zipline | zipline/lib/adjusted_array.py | AdjustedArray._iterator_type | def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype] | python | def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype] | [
"def",
"_iterator_type",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_data",
",",
"LabelArray",
")",
":",
"return",
"LabelWindow",
"return",
"CONCRETE_WINDOW_TYPES",
"[",
"self",
".",
"_data",
".",
"dtype",
"]"
] | The iterator produced when `traverse` is called on this Array. | [
"The",
"iterator",
"produced",
"when",
"traverse",
"is",
"called",
"on",
"this",
"Array",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L253-L259 |
25,862 | quantopian/zipline | zipline/lib/adjusted_array.py | AdjustedArray.traverse | def traverse(self,
window_length,
offset=0,
perspective_offset=0):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
"""
data = self._data.copy()
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
) | python | def traverse(self,
window_length,
offset=0,
perspective_offset=0):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
"""
data = self._data.copy()
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
) | [
"def",
"traverse",
"(",
"self",
",",
"window_length",
",",
"offset",
"=",
"0",
",",
"perspective_offset",
"=",
"0",
")",
":",
"data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"_check_window_params",
"(",
"data",
",",
"window_length",
")",
"return",
"self",
".",
"_iterator_type",
"(",
"data",
",",
"self",
".",
"_view_kwargs",
",",
"self",
".",
"adjustments",
",",
"offset",
",",
"window_length",
",",
"perspective_offset",
",",
"rounding_places",
"=",
"None",
",",
")"
] | Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data. | [
"Produce",
"an",
"iterator",
"rolling",
"windows",
"rows",
"over",
"our",
"data",
".",
"Each",
"emitted",
"window",
"will",
"have",
"window_length",
"rows",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L261-L289 |
25,863 | quantopian/zipline | zipline/lib/adjusted_array.py | AdjustedArray.inspect | def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
) | python | def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
) | [
"def",
"inspect",
"(",
"self",
")",
":",
"return",
"dedent",
"(",
"\"\"\"\\\n Adjusted Array ({dtype}):\n\n Data:\n {data!r}\n\n Adjustments:\n {adjustments}\n \"\"\"",
")",
".",
"format",
"(",
"dtype",
"=",
"self",
".",
"dtype",
".",
"name",
",",
"data",
"=",
"self",
".",
"data",
",",
"adjustments",
"=",
"self",
".",
"adjustments",
",",
")"
] | Return a string representation of the data stored in this array. | [
"Return",
"a",
"string",
"representation",
"of",
"the",
"data",
"stored",
"in",
"this",
"array",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L291-L309 |
25,864 | quantopian/zipline | zipline/lib/adjusted_array.py | AdjustedArray.update_labels | def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
"""
if not isinstance(self.data, LabelArray):
raise TypeError(
'update_labels only supported if data is of type LabelArray.'
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in iteritems(self.adjustments):
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value) | python | def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
"""
if not isinstance(self.data, LabelArray):
raise TypeError(
'update_labels only supported if data is of type LabelArray.'
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in iteritems(self.adjustments):
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value) | [
"def",
"update_labels",
"(",
"self",
",",
"func",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"data",
",",
"LabelArray",
")",
":",
"raise",
"TypeError",
"(",
"'update_labels only supported if data is of type LabelArray.'",
")",
"# Map the baseline values.",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
".",
"map",
"(",
"func",
")",
"# Map each of the adjustments.",
"for",
"_",
",",
"row_adjustments",
"in",
"iteritems",
"(",
"self",
".",
"adjustments",
")",
":",
"for",
"adjustment",
"in",
"row_adjustments",
":",
"adjustment",
".",
"value",
"=",
"func",
"(",
"adjustment",
".",
"value",
")"
] | Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray. | [
"Map",
"a",
"function",
"over",
"baseline",
"and",
"adjustment",
"values",
"in",
"place",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L311-L328 |
25,865 | quantopian/zipline | zipline/finance/controls.py | TradingControl.handle_violation | def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = self._constraint_msg(metadata)
if self.on_error == 'fail':
raise TradingControlViolation(
asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
elif self.on_error == 'log':
log.error("Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount, asset=asset, dt=datetime,
constraint=constraint) | python | def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = self._constraint_msg(metadata)
if self.on_error == 'fail':
raise TradingControlViolation(
asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
elif self.on_error == 'log':
log.error("Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount, asset=asset, dt=datetime,
constraint=constraint) | [
"def",
"handle_violation",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"datetime",
",",
"metadata",
"=",
"None",
")",
":",
"constraint",
"=",
"self",
".",
"_constraint_msg",
"(",
"metadata",
")",
"if",
"self",
".",
"on_error",
"==",
"'fail'",
":",
"raise",
"TradingControlViolation",
"(",
"asset",
"=",
"asset",
",",
"amount",
"=",
"amount",
",",
"datetime",
"=",
"datetime",
",",
"constraint",
"=",
"constraint",
")",
"elif",
"self",
".",
"on_error",
"==",
"'log'",
":",
"log",
".",
"error",
"(",
"\"Order for {amount} shares of {asset} at {dt} \"",
"\"violates trading constraint {constraint}\"",
",",
"amount",
"=",
"amount",
",",
"asset",
"=",
"asset",
",",
"dt",
"=",
"datetime",
",",
"constraint",
"=",
"constraint",
")"
] | Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`. | [
"Handle",
"a",
"TradingControlViolation",
"either",
"by",
"raising",
"or",
"logging",
"and",
"error",
"with",
"information",
"about",
"the",
"failure",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L79-L99 |
25,866 | quantopian/zipline | zipline/finance/controls.py | MaxOrderCount.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1 | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1 | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"algo_date",
"=",
"algo_datetime",
".",
"date",
"(",
")",
"# Reset order count if it's a new day.",
"if",
"self",
".",
"current_date",
"and",
"self",
".",
"current_date",
"!=",
"algo_date",
":",
"self",
".",
"orders_placed",
"=",
"0",
"self",
".",
"current_date",
"=",
"algo_date",
"if",
"self",
".",
"orders_placed",
">=",
"self",
".",
"max_count",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")",
"self",
".",
"orders_placed",
"+=",
"1"
] | Fail if we've already placed self.max_count orders today. | [
"Fail",
"if",
"we",
"ve",
"already",
"placed",
"self",
".",
"max_count",
"orders",
"today",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L119-L137 |
25,867 | quantopian/zipline | zipline/finance/controls.py | RestrictedListOrder.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime) | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime) | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"if",
"self",
".",
"restrictions",
".",
"is_restricted",
"(",
"asset",
",",
"algo_datetime",
")",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")"
] | Fail if the asset is in the restricted_list. | [
"Fail",
"if",
"the",
"asset",
"is",
"in",
"the",
"restricted_list",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L154-L164 |
25,868 | quantopian/zipline | zipline/finance/controls.py | MaxOrderSize.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"if",
"self",
".",
"asset",
"is",
"not",
"None",
"and",
"self",
".",
"asset",
"!=",
"asset",
":",
"return",
"if",
"self",
".",
"max_shares",
"is",
"not",
"None",
"and",
"abs",
"(",
"amount",
")",
">",
"self",
".",
"max_shares",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")",
"current_asset_price",
"=",
"algo_current_data",
".",
"current",
"(",
"asset",
",",
"\"price\"",
")",
"order_value",
"=",
"amount",
"*",
"current_asset_price",
"too_much_value",
"=",
"(",
"self",
".",
"max_notional",
"is",
"not",
"None",
"and",
"abs",
"(",
"order_value",
")",
">",
"self",
".",
"max_notional",
")",
"if",
"too_much_value",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")"
] | Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional. | [
"Fail",
"if",
"the",
"magnitude",
"of",
"the",
"given",
"order",
"exceeds",
"either",
"self",
".",
"max_shares",
"or",
"self",
".",
"max_notional",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L199-L223 |
25,869 | quantopian/zipline | zipline/finance/controls.py | MaxPositionSize.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"if",
"self",
".",
"asset",
"is",
"not",
"None",
"and",
"self",
".",
"asset",
"!=",
"asset",
":",
"return",
"current_share_count",
"=",
"portfolio",
".",
"positions",
"[",
"asset",
"]",
".",
"amount",
"shares_post_order",
"=",
"current_share_count",
"+",
"amount",
"too_many_shares",
"=",
"(",
"self",
".",
"max_shares",
"is",
"not",
"None",
"and",
"abs",
"(",
"shares_post_order",
")",
">",
"self",
".",
"max_shares",
")",
"if",
"too_many_shares",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")",
"current_price",
"=",
"algo_current_data",
".",
"current",
"(",
"asset",
",",
"\"price\"",
")",
"value_post_order",
"=",
"shares_post_order",
"*",
"current_price",
"too_much_value",
"=",
"(",
"self",
".",
"max_notional",
"is",
"not",
"None",
"and",
"abs",
"(",
"value_post_order",
")",
">",
"self",
".",
"max_notional",
")",
"if",
"too_much_value",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")"
] | Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional. | [
"Fail",
"if",
"the",
"given",
"order",
"would",
"cause",
"the",
"magnitude",
"of",
"our",
"position",
"to",
"be",
"greater",
"in",
"shares",
"than",
"self",
".",
"max_shares",
"or",
"greater",
"in",
"dollar",
"value",
"than",
"self",
".",
"max_notional",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L257-L287 |
25,870 | quantopian/zipline | zipline/finance/controls.py | LongOnly.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime) | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime) | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"if",
"portfolio",
".",
"positions",
"[",
"asset",
"]",
".",
"amount",
"+",
"amount",
"<",
"0",
":",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
")"
] | Fail if we would hold negative shares of asset after completing this
order. | [
"Fail",
"if",
"we",
"would",
"hold",
"negative",
"shares",
"of",
"asset",
"after",
"completing",
"this",
"order",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L298-L309 |
25,871 | quantopian/zipline | zipline/finance/controls.py | AssetDateBounds.validate | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata) | python | def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata) | [
"def",
"validate",
"(",
"self",
",",
"asset",
",",
"amount",
",",
"portfolio",
",",
"algo_datetime",
",",
"algo_current_data",
")",
":",
"# If the order is for 0 shares, then silently pass through.",
"if",
"amount",
"==",
"0",
":",
"return",
"normalized_algo_dt",
"=",
"pd",
".",
"Timestamp",
"(",
"algo_datetime",
")",
".",
"normalize",
"(",
")",
"# Fail if the algo is before this Asset's start_date",
"if",
"asset",
".",
"start_date",
":",
"normalized_start",
"=",
"pd",
".",
"Timestamp",
"(",
"asset",
".",
"start_date",
")",
".",
"normalize",
"(",
")",
"if",
"normalized_algo_dt",
"<",
"normalized_start",
":",
"metadata",
"=",
"{",
"'asset_start_date'",
":",
"normalized_start",
"}",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
",",
"metadata",
"=",
"metadata",
")",
"# Fail if the algo has passed this Asset's end_date",
"if",
"asset",
".",
"end_date",
":",
"normalized_end",
"=",
"pd",
".",
"Timestamp",
"(",
"asset",
".",
"end_date",
")",
".",
"normalize",
"(",
")",
"if",
"normalized_algo_dt",
">",
"normalized_end",
":",
"metadata",
"=",
"{",
"'asset_end_date'",
":",
"normalized_end",
"}",
"self",
".",
"handle_violation",
"(",
"asset",
",",
"amount",
",",
"algo_datetime",
",",
"metadata",
"=",
"metadata",
")"
] | Fail if the algo has passed this Asset's end_date, or before the
Asset's start date. | [
"Fail",
"if",
"the",
"algo",
"has",
"passed",
"this",
"Asset",
"s",
"end_date",
"or",
"before",
"the",
"Asset",
"s",
"start",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L321-L354 |
25,872 | quantopian/zipline | zipline/finance/controls.py | MaxLeverage.validate | def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail() | python | def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail() | [
"def",
"validate",
"(",
"self",
",",
"_portfolio",
",",
"_account",
",",
"_algo_datetime",
",",
"_algo_current_data",
")",
":",
"if",
"_account",
".",
"leverage",
">",
"self",
".",
"max_leverage",
":",
"self",
".",
"fail",
"(",
")"
] | Fail if the leverage is greater than the allowed leverage. | [
"Fail",
"if",
"the",
"leverage",
"is",
"greater",
"than",
"the",
"allowed",
"leverage",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L424-L433 |
25,873 | quantopian/zipline | zipline/finance/controls.py | MinLeverage.validate | def validate(self,
_portfolio,
account,
algo_datetime,
_algo_current_data):
"""
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
"""
if (algo_datetime > self.deadline and
account.leverage < self.min_leverage):
self.fail() | python | def validate(self,
_portfolio,
account,
algo_datetime,
_algo_current_data):
"""
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
"""
if (algo_datetime > self.deadline and
account.leverage < self.min_leverage):
self.fail() | [
"def",
"validate",
"(",
"self",
",",
"_portfolio",
",",
"account",
",",
"algo_datetime",
",",
"_algo_current_data",
")",
":",
"if",
"(",
"algo_datetime",
">",
"self",
".",
"deadline",
"and",
"account",
".",
"leverage",
"<",
"self",
".",
"min_leverage",
")",
":",
"self",
".",
"fail",
"(",
")"
] | Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage. | [
"Make",
"validation",
"checks",
"if",
"we",
"are",
"after",
"the",
"deadline",
".",
"Fail",
"if",
"the",
"leverage",
"is",
"less",
"than",
"the",
"min",
"leverage",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L463-L474 |
25,874 | quantopian/zipline | zipline/assets/asset_db_migrations.py | alter_columns | def alter_columns(op, name, *columns, **kwargs):
"""Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change.
"""
selection_string = kwargs.pop('selection_string', None)
if kwargs:
raise TypeError(
'alter_columns received extra arguments: %r' % sorted(kwargs),
)
if selection_string is None:
selection_string = ', '.join(column.name for column in columns)
tmp_name = '_alter_columns_' + name
op.rename_table(name, tmp_name)
for column in columns:
# Clear any indices that already exist on this table, otherwise we will
# fail to create the table because the indices will already be present.
# When we create the table below, the indices that we want to preserve
# will just get recreated.
for table in name, tmp_name:
try:
op.drop_index('ix_%s_%s' % (table, column.name))
except sa.exc.OperationalError:
pass
op.create_table(name, *columns)
op.execute(
'insert into %s select %s from %s' % (
name,
selection_string,
tmp_name,
),
)
op.drop_table(tmp_name) | python | def alter_columns(op, name, *columns, **kwargs):
"""Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change.
"""
selection_string = kwargs.pop('selection_string', None)
if kwargs:
raise TypeError(
'alter_columns received extra arguments: %r' % sorted(kwargs),
)
if selection_string is None:
selection_string = ', '.join(column.name for column in columns)
tmp_name = '_alter_columns_' + name
op.rename_table(name, tmp_name)
for column in columns:
# Clear any indices that already exist on this table, otherwise we will
# fail to create the table because the indices will already be present.
# When we create the table below, the indices that we want to preserve
# will just get recreated.
for table in name, tmp_name:
try:
op.drop_index('ix_%s_%s' % (table, column.name))
except sa.exc.OperationalError:
pass
op.create_table(name, *columns)
op.execute(
'insert into %s select %s from %s' % (
name,
selection_string,
tmp_name,
),
)
op.drop_table(tmp_name) | [
"def",
"alter_columns",
"(",
"op",
",",
"name",
",",
"*",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"selection_string",
"=",
"kwargs",
".",
"pop",
"(",
"'selection_string'",
",",
"None",
")",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'alter_columns received extra arguments: %r'",
"%",
"sorted",
"(",
"kwargs",
")",
",",
")",
"if",
"selection_string",
"is",
"None",
":",
"selection_string",
"=",
"', '",
".",
"join",
"(",
"column",
".",
"name",
"for",
"column",
"in",
"columns",
")",
"tmp_name",
"=",
"'_alter_columns_'",
"+",
"name",
"op",
".",
"rename_table",
"(",
"name",
",",
"tmp_name",
")",
"for",
"column",
"in",
"columns",
":",
"# Clear any indices that already exist on this table, otherwise we will",
"# fail to create the table because the indices will already be present.",
"# When we create the table below, the indices that we want to preserve",
"# will just get recreated.",
"for",
"table",
"in",
"name",
",",
"tmp_name",
":",
"try",
":",
"op",
".",
"drop_index",
"(",
"'ix_%s_%s'",
"%",
"(",
"table",
",",
"column",
".",
"name",
")",
")",
"except",
"sa",
".",
"exc",
".",
"OperationalError",
":",
"pass",
"op",
".",
"create_table",
"(",
"name",
",",
"*",
"columns",
")",
"op",
".",
"execute",
"(",
"'insert into %s select %s from %s'",
"%",
"(",
"name",
",",
"selection_string",
",",
"tmp_name",
",",
")",
",",
")",
"op",
".",
"drop_table",
"(",
"tmp_name",
")"
] | Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change. | [
"Alter",
"columns",
"from",
"a",
"table",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L13-L61 |
25,875 | quantopian/zipline | zipline/assets/asset_db_migrations.py | downgrade | def downgrade(engine, desired_version):
"""Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database.
"""
# Check the version of the db at the engine
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
# Check for accidental upgrade
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
# Check if the desired version is already the db version
if starting_version == desired_version:
# No downgrade needed
return
# Create alembic context
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# Integer keys of downgrades to run
# E.g.: [5, 4, 3, 2] would downgrade v6 to v2
downgrade_keys = range(desired_version, starting_version)[::-1]
# Disable foreign keys until all downgrades are complete
_pragma_foreign_keys(conn, False)
# Execute the downgrades in order
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
# Re-enable foreign keys
_pragma_foreign_keys(conn, True) | python | def downgrade(engine, desired_version):
"""Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database.
"""
# Check the version of the db at the engine
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
# Check for accidental upgrade
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
# Check if the desired version is already the db version
if starting_version == desired_version:
# No downgrade needed
return
# Create alembic context
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# Integer keys of downgrades to run
# E.g.: [5, 4, 3, 2] would downgrade v6 to v2
downgrade_keys = range(desired_version, starting_version)[::-1]
# Disable foreign keys until all downgrades are complete
_pragma_foreign_keys(conn, False)
# Execute the downgrades in order
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
# Re-enable foreign keys
_pragma_foreign_keys(conn, True) | [
"def",
"downgrade",
"(",
"engine",
",",
"desired_version",
")",
":",
"# Check the version of the db at the engine",
"with",
"engine",
".",
"begin",
"(",
")",
"as",
"conn",
":",
"metadata",
"=",
"sa",
".",
"MetaData",
"(",
"conn",
")",
"metadata",
".",
"reflect",
"(",
")",
"version_info_table",
"=",
"metadata",
".",
"tables",
"[",
"'version_info'",
"]",
"starting_version",
"=",
"sa",
".",
"select",
"(",
"(",
"version_info_table",
".",
"c",
".",
"version",
",",
")",
")",
".",
"scalar",
"(",
")",
"# Check for accidental upgrade",
"if",
"starting_version",
"<",
"desired_version",
":",
"raise",
"AssetDBImpossibleDowngrade",
"(",
"db_version",
"=",
"starting_version",
",",
"desired_version",
"=",
"desired_version",
")",
"# Check if the desired version is already the db version",
"if",
"starting_version",
"==",
"desired_version",
":",
"# No downgrade needed",
"return",
"# Create alembic context",
"ctx",
"=",
"MigrationContext",
".",
"configure",
"(",
"conn",
")",
"op",
"=",
"Operations",
"(",
"ctx",
")",
"# Integer keys of downgrades to run",
"# E.g.: [5, 4, 3, 2] would downgrade v6 to v2",
"downgrade_keys",
"=",
"range",
"(",
"desired_version",
",",
"starting_version",
")",
"[",
":",
":",
"-",
"1",
"]",
"# Disable foreign keys until all downgrades are complete",
"_pragma_foreign_keys",
"(",
"conn",
",",
"False",
")",
"# Execute the downgrades in order",
"for",
"downgrade_key",
"in",
"downgrade_keys",
":",
"_downgrade_methods",
"[",
"downgrade_key",
"]",
"(",
"op",
",",
"conn",
",",
"version_info_table",
")",
"# Re-enable foreign keys",
"_pragma_foreign_keys",
"(",
"conn",
",",
"True",
")"
] | Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database. | [
"Downgrades",
"the",
"assets",
"db",
"at",
"the",
"given",
"engine",
"to",
"the",
"desired",
"version",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L65-L109 |
25,876 | quantopian/zipline | zipline/assets/asset_db_migrations.py | downgrades | def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ | python | def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ | [
"def",
"downgrades",
"(",
"src",
")",
":",
"def",
"_",
"(",
"f",
")",
":",
"destination",
"=",
"src",
"-",
"1",
"@",
"do",
"(",
"operator",
".",
"setitem",
"(",
"_downgrade_methods",
",",
"destination",
")",
")",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"op",
",",
"conn",
",",
"version_info_table",
")",
":",
"conn",
".",
"execute",
"(",
"version_info_table",
".",
"delete",
"(",
")",
")",
"# clear the version",
"f",
"(",
"op",
")",
"write_version_info",
"(",
"conn",
",",
"version_info_table",
",",
"destination",
")",
"return",
"wrapper",
"return",
"_"
] | Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply. | [
"Decorator",
"for",
"marking",
"that",
"a",
"method",
"is",
"a",
"downgrade",
"to",
"a",
"version",
"to",
"the",
"previous",
"version",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L133-L158 |
25,877 | quantopian/zipline | zipline/assets/asset_db_migrations.py | _downgrade_v1 | def _downgrade_v1(op):
"""
Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_futures_contracts_root_symbol')
op.drop_index('ix_futures_contracts_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('futures_contracts') as batch_op:
# Rename 'multiplier'
batch_op.alter_column(column_name='multiplier',
new_column_name='contract_multiplier')
# Delete 'tick_size'
batch_op.drop_column('tick_size')
# Recreate indices after batch
op.create_index('ix_futures_contracts_root_symbol',
table_name='futures_contracts',
columns=['root_symbol'])
op.create_index('ix_futures_contracts_symbol',
table_name='futures_contracts',
columns=['symbol'],
unique=True) | python | def _downgrade_v1(op):
"""
Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_futures_contracts_root_symbol')
op.drop_index('ix_futures_contracts_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('futures_contracts') as batch_op:
# Rename 'multiplier'
batch_op.alter_column(column_name='multiplier',
new_column_name='contract_multiplier')
# Delete 'tick_size'
batch_op.drop_column('tick_size')
# Recreate indices after batch
op.create_index('ix_futures_contracts_root_symbol',
table_name='futures_contracts',
columns=['root_symbol'])
op.create_index('ix_futures_contracts_symbol',
table_name='futures_contracts',
columns=['symbol'],
unique=True) | [
"def",
"_downgrade_v1",
"(",
"op",
")",
":",
"# Drop indices before batch",
"# This is to prevent index collision when creating the temp table",
"op",
".",
"drop_index",
"(",
"'ix_futures_contracts_root_symbol'",
")",
"op",
".",
"drop_index",
"(",
"'ix_futures_contracts_symbol'",
")",
"# Execute batch op to allow column modification in SQLite",
"with",
"op",
".",
"batch_alter_table",
"(",
"'futures_contracts'",
")",
"as",
"batch_op",
":",
"# Rename 'multiplier'",
"batch_op",
".",
"alter_column",
"(",
"column_name",
"=",
"'multiplier'",
",",
"new_column_name",
"=",
"'contract_multiplier'",
")",
"# Delete 'tick_size'",
"batch_op",
".",
"drop_column",
"(",
"'tick_size'",
")",
"# Recreate indices after batch",
"op",
".",
"create_index",
"(",
"'ix_futures_contracts_root_symbol'",
",",
"table_name",
"=",
"'futures_contracts'",
",",
"columns",
"=",
"[",
"'root_symbol'",
"]",
")",
"op",
".",
"create_index",
"(",
"'ix_futures_contracts_symbol'",
",",
"table_name",
"=",
"'futures_contracts'",
",",
"columns",
"=",
"[",
"'symbol'",
"]",
",",
"unique",
"=",
"True",
")"
] | Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column. | [
"Downgrade",
"assets",
"db",
"by",
"removing",
"the",
"tick_size",
"column",
"and",
"renaming",
"the",
"multiplier",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L162-L189 |
25,878 | quantopian/zipline | zipline/assets/asset_db_migrations.py | _downgrade_v2 | def _downgrade_v2(op):
"""
Downgrade assets db by removing the 'auto_close_date' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('auto_close_date')
# Recreate indices after batch
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) | python | def _downgrade_v2(op):
"""
Downgrade assets db by removing the 'auto_close_date' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('auto_close_date')
# Recreate indices after batch
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) | [
"def",
"_downgrade_v2",
"(",
"op",
")",
":",
"# Drop indices before batch",
"# This is to prevent index collision when creating the temp table",
"op",
".",
"drop_index",
"(",
"'ix_equities_fuzzy_symbol'",
")",
"op",
".",
"drop_index",
"(",
"'ix_equities_company_symbol'",
")",
"# Execute batch op to allow column modification in SQLite",
"with",
"op",
".",
"batch_alter_table",
"(",
"'equities'",
")",
"as",
"batch_op",
":",
"batch_op",
".",
"drop_column",
"(",
"'auto_close_date'",
")",
"# Recreate indices after batch",
"op",
".",
"create_index",
"(",
"'ix_equities_fuzzy_symbol'",
",",
"table_name",
"=",
"'equities'",
",",
"columns",
"=",
"[",
"'fuzzy_symbol'",
"]",
")",
"op",
".",
"create_index",
"(",
"'ix_equities_company_symbol'",
",",
"table_name",
"=",
"'equities'",
",",
"columns",
"=",
"[",
"'company_symbol'",
"]",
")"
] | Downgrade assets db by removing the 'auto_close_date' column. | [
"Downgrade",
"assets",
"db",
"by",
"removing",
"the",
"auto_close_date",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L193-L212 |
25,879 | quantopian/zipline | zipline/assets/asset_db_migrations.py | _downgrade_v3 | def _downgrade_v3(op):
"""
Downgrade assets db by adding a not null constraint on
``equities.first_traded``
"""
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
op.execute(
"""
insert into _new_equities
select * from equities
where equities.first_traded is not null
""",
)
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indices have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
) | python | def _downgrade_v3(op):
"""
Downgrade assets db by adding a not null constraint on
``equities.first_traded``
"""
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
op.execute(
"""
insert into _new_equities
select * from equities
where equities.first_traded is not null
""",
)
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indices have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
) | [
"def",
"_downgrade_v3",
"(",
"op",
")",
":",
"op",
".",
"create_table",
"(",
"'_new_equities'",
",",
"sa",
".",
"Column",
"(",
"'sid'",
",",
"sa",
".",
"Integer",
",",
"unique",
"=",
"True",
",",
"nullable",
"=",
"False",
",",
"primary_key",
"=",
"True",
",",
")",
",",
"sa",
".",
"Column",
"(",
"'symbol'",
",",
"sa",
".",
"Text",
")",
",",
"sa",
".",
"Column",
"(",
"'company_symbol'",
",",
"sa",
".",
"Text",
")",
",",
"sa",
".",
"Column",
"(",
"'share_class_symbol'",
",",
"sa",
".",
"Text",
")",
",",
"sa",
".",
"Column",
"(",
"'fuzzy_symbol'",
",",
"sa",
".",
"Text",
")",
",",
"sa",
".",
"Column",
"(",
"'asset_name'",
",",
"sa",
".",
"Text",
")",
",",
"sa",
".",
"Column",
"(",
"'start_date'",
",",
"sa",
".",
"Integer",
",",
"default",
"=",
"0",
",",
"nullable",
"=",
"False",
")",
",",
"sa",
".",
"Column",
"(",
"'end_date'",
",",
"sa",
".",
"Integer",
",",
"nullable",
"=",
"False",
")",
",",
"sa",
".",
"Column",
"(",
"'first_traded'",
",",
"sa",
".",
"Integer",
",",
"nullable",
"=",
"False",
")",
",",
"sa",
".",
"Column",
"(",
"'auto_close_date'",
",",
"sa",
".",
"Integer",
")",
",",
"sa",
".",
"Column",
"(",
"'exchange'",
",",
"sa",
".",
"Text",
")",
",",
")",
"op",
".",
"execute",
"(",
"\"\"\"\n insert into _new_equities\n select * from equities\n where equities.first_traded is not null\n \"\"\"",
",",
")",
"op",
".",
"drop_table",
"(",
"'equities'",
")",
"op",
".",
"rename_table",
"(",
"'_new_equities'",
",",
"'equities'",
")",
"# we need to make sure the indices have the proper names after the rename",
"op",
".",
"create_index",
"(",
"'ix_equities_company_symbol'",
",",
"'equities'",
",",
"[",
"'company_symbol'",
"]",
",",
")",
"op",
".",
"create_index",
"(",
"'ix_equities_fuzzy_symbol'",
",",
"'equities'",
",",
"[",
"'fuzzy_symbol'",
"]",
",",
")"
] | Downgrade assets db by adding a not null constraint on
``equities.first_traded`` | [
"Downgrade",
"assets",
"db",
"by",
"adding",
"a",
"not",
"null",
"constraint",
"on",
"equities",
".",
"first_traded"
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L216-L260 |
25,880 | quantopian/zipline | zipline/assets/asset_db_migrations.py | _downgrade_v4 | def _downgrade_v4(op):
"""
Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column.
"""
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute("UPDATE equities SET exchange = exchange_full")
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) | python | def _downgrade_v4(op):
"""
Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column.
"""
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute("UPDATE equities SET exchange = exchange_full")
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) | [
"def",
"_downgrade_v4",
"(",
"op",
")",
":",
"op",
".",
"drop_index",
"(",
"'ix_equities_fuzzy_symbol'",
")",
"op",
".",
"drop_index",
"(",
"'ix_equities_company_symbol'",
")",
"op",
".",
"execute",
"(",
"\"UPDATE equities SET exchange = exchange_full\"",
")",
"with",
"op",
".",
"batch_alter_table",
"(",
"'equities'",
")",
"as",
"batch_op",
":",
"batch_op",
".",
"drop_column",
"(",
"'exchange_full'",
")",
"op",
".",
"create_index",
"(",
"'ix_equities_fuzzy_symbol'",
",",
"table_name",
"=",
"'equities'",
",",
"columns",
"=",
"[",
"'fuzzy_symbol'",
"]",
")",
"op",
".",
"create_index",
"(",
"'ix_equities_company_symbol'",
",",
"table_name",
"=",
"'equities'",
",",
"columns",
"=",
"[",
"'company_symbol'",
"]",
")"
] | Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column. | [
"Downgrades",
"assets",
"db",
"by",
"copying",
"the",
"exchange_full",
"column",
"to",
"exchange",
"then",
"dropping",
"the",
"exchange_full",
"column",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L264-L282 |
25,881 | quantopian/zipline | zipline/finance/metrics/core.py | _make_metrics_set_core | def _make_metrics_set_core():
"""Create a family of metrics sets functions that read from the same
metrics set mapping.
Returns
-------
metrics_sets : mappingproxy
The mapping of metrics sets to load functions.
register : callable
The function which registers new metrics sets in the ``metrics_sets``
mapping.
unregister : callable
The function which deregisters metrics sets from the ``metrics_sets``
mapping.
load : callable
The function which loads the ingested metrics sets back into memory.
"""
_metrics_sets = {}
# Expose _metrics_sets through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another metrics set.
metrics_sets = mappingproxy(_metrics_sets)
def register(name, function=None):
"""Register a new metrics set.
Parameters
----------
name : str
The name of the metrics set
function : callable
The callable which produces the metrics set.
Notes
-----
This may be used as a decorator if only ``name`` is passed.
See Also
--------
zipline.finance.metrics.get_metrics_set
zipline.finance.metrics.unregister_metrics_set
"""
if function is None:
# allow as decorator with just name.
return partial(register, name)
if name in _metrics_sets:
raise ValueError('metrics set %r is already registered' % name)
_metrics_sets[name] = function
return function
def unregister(name):
"""Unregister an existing metrics set.
Parameters
----------
name : str
The name of the metrics set
See Also
--------
zipline.finance.metrics.register_metrics_set
"""
try:
del _metrics_sets[name]
except KeyError:
raise ValueError(
'metrics set %r was not already registered' % name,
)
def load(name):
"""Return an instance of the metrics set registered with the given name.
Returns
-------
metrics : set[Metric]
A new instance of the metrics set.
Raises
------
ValueError
Raised when no metrics set is registered to ``name``
"""
try:
function = _metrics_sets[name]
except KeyError:
raise ValueError(
'no metrics set registered as %r, options are: %r' % (
name,
sorted(_metrics_sets),
),
)
return function()
return metrics_sets, register, unregister, load | python | def _make_metrics_set_core():
"""Create a family of metrics sets functions that read from the same
metrics set mapping.
Returns
-------
metrics_sets : mappingproxy
The mapping of metrics sets to load functions.
register : callable
The function which registers new metrics sets in the ``metrics_sets``
mapping.
unregister : callable
The function which deregisters metrics sets from the ``metrics_sets``
mapping.
load : callable
The function which loads the ingested metrics sets back into memory.
"""
_metrics_sets = {}
# Expose _metrics_sets through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another metrics set.
metrics_sets = mappingproxy(_metrics_sets)
def register(name, function=None):
"""Register a new metrics set.
Parameters
----------
name : str
The name of the metrics set
function : callable
The callable which produces the metrics set.
Notes
-----
This may be used as a decorator if only ``name`` is passed.
See Also
--------
zipline.finance.metrics.get_metrics_set
zipline.finance.metrics.unregister_metrics_set
"""
if function is None:
# allow as decorator with just name.
return partial(register, name)
if name in _metrics_sets:
raise ValueError('metrics set %r is already registered' % name)
_metrics_sets[name] = function
return function
def unregister(name):
"""Unregister an existing metrics set.
Parameters
----------
name : str
The name of the metrics set
See Also
--------
zipline.finance.metrics.register_metrics_set
"""
try:
del _metrics_sets[name]
except KeyError:
raise ValueError(
'metrics set %r was not already registered' % name,
)
def load(name):
"""Return an instance of the metrics set registered with the given name.
Returns
-------
metrics : set[Metric]
A new instance of the metrics set.
Raises
------
ValueError
Raised when no metrics set is registered to ``name``
"""
try:
function = _metrics_sets[name]
except KeyError:
raise ValueError(
'no metrics set registered as %r, options are: %r' % (
name,
sorted(_metrics_sets),
),
)
return function()
return metrics_sets, register, unregister, load | [
"def",
"_make_metrics_set_core",
"(",
")",
":",
"_metrics_sets",
"=",
"{",
"}",
"# Expose _metrics_sets through a proxy so that users cannot mutate this",
"# accidentally. Users may go through `register` to update this which will",
"# warn when trampling another metrics set.",
"metrics_sets",
"=",
"mappingproxy",
"(",
"_metrics_sets",
")",
"def",
"register",
"(",
"name",
",",
"function",
"=",
"None",
")",
":",
"\"\"\"Register a new metrics set.\n\n Parameters\n ----------\n name : str\n The name of the metrics set\n function : callable\n The callable which produces the metrics set.\n\n Notes\n -----\n This may be used as a decorator if only ``name`` is passed.\n\n See Also\n --------\n zipline.finance.metrics.get_metrics_set\n zipline.finance.metrics.unregister_metrics_set\n \"\"\"",
"if",
"function",
"is",
"None",
":",
"# allow as decorator with just name.",
"return",
"partial",
"(",
"register",
",",
"name",
")",
"if",
"name",
"in",
"_metrics_sets",
":",
"raise",
"ValueError",
"(",
"'metrics set %r is already registered'",
"%",
"name",
")",
"_metrics_sets",
"[",
"name",
"]",
"=",
"function",
"return",
"function",
"def",
"unregister",
"(",
"name",
")",
":",
"\"\"\"Unregister an existing metrics set.\n\n Parameters\n ----------\n name : str\n The name of the metrics set\n\n See Also\n --------\n zipline.finance.metrics.register_metrics_set\n \"\"\"",
"try",
":",
"del",
"_metrics_sets",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'metrics set %r was not already registered'",
"%",
"name",
",",
")",
"def",
"load",
"(",
"name",
")",
":",
"\"\"\"Return an instance of the metrics set registered with the given name.\n\n Returns\n -------\n metrics : set[Metric]\n A new instance of the metrics set.\n\n Raises\n ------\n ValueError\n Raised when no metrics set is registered to ``name``\n \"\"\"",
"try",
":",
"function",
"=",
"_metrics_sets",
"[",
"name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'no metrics set registered as %r, options are: %r'",
"%",
"(",
"name",
",",
"sorted",
"(",
"_metrics_sets",
")",
",",
")",
",",
")",
"return",
"function",
"(",
")",
"return",
"metrics_sets",
",",
"register",
",",
"unregister",
",",
"load"
] | Create a family of metrics sets functions that read from the same
metrics set mapping.
Returns
-------
metrics_sets : mappingproxy
The mapping of metrics sets to load functions.
register : callable
The function which registers new metrics sets in the ``metrics_sets``
mapping.
unregister : callable
The function which deregisters metrics sets from the ``metrics_sets``
mapping.
load : callable
The function which loads the ingested metrics sets back into memory. | [
"Create",
"a",
"family",
"of",
"metrics",
"sets",
"functions",
"that",
"read",
"from",
"the",
"same",
"metrics",
"set",
"mapping",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/core.py#L6-L103 |
25,882 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | validate_column_specs | def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) | python | def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) | [
"def",
"validate_column_specs",
"(",
"events",
",",
"columns",
")",
":",
"required",
"=",
"required_estimates_fields",
"(",
"columns",
")",
"received",
"=",
"set",
"(",
"events",
".",
"columns",
")",
"missing",
"=",
"required",
"-",
"received",
"if",
"missing",
":",
"raise",
"ValueError",
"(",
"\"EarningsEstimatesLoader missing required columns {missing}.\\n\"",
"\"Got Columns: {received}\\n\"",
"\"Expected Columns: {required}\"",
".",
"format",
"(",
"missing",
"=",
"sorted",
"(",
"missing",
")",
",",
"received",
"=",
"sorted",
"(",
"received",
")",
",",
"required",
"=",
"sorted",
"(",
"required",
")",
",",
")",
")"
] | Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`. | [
"Verify",
"that",
"the",
"columns",
"of",
"events",
"can",
"be",
"used",
"by",
"a",
"EarningsEstimatesLoader",
"to",
"serve",
"the",
"BoundColumns",
"described",
"by",
"columns",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L74-L92 |
25,883 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.get_requested_quarter_data | def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) | python | def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) | [
"def",
"get_requested_quarter_data",
"(",
"self",
",",
"zero_qtr_data",
",",
"zeroth_quarter_idx",
",",
"stacked_last_per_qtr",
",",
"num_announcements",
",",
"dates",
")",
":",
"zero_qtr_data_idx",
"=",
"zero_qtr_data",
".",
"index",
"requested_qtr_idx",
"=",
"pd",
".",
"MultiIndex",
".",
"from_arrays",
"(",
"[",
"zero_qtr_data_idx",
".",
"get_level_values",
"(",
"0",
")",
",",
"zero_qtr_data_idx",
".",
"get_level_values",
"(",
"1",
")",
",",
"self",
".",
"get_shifted_qtrs",
"(",
"zeroth_quarter_idx",
".",
"get_level_values",
"(",
"NORMALIZED_QUARTERS",
",",
")",
",",
"num_announcements",
",",
")",
",",
"]",
",",
"names",
"=",
"[",
"zero_qtr_data_idx",
".",
"names",
"[",
"0",
"]",
",",
"zero_qtr_data_idx",
".",
"names",
"[",
"1",
"]",
",",
"SHIFTED_NORMALIZED_QTRS",
",",
"]",
",",
")",
"requested_qtr_data",
"=",
"stacked_last_per_qtr",
".",
"loc",
"[",
"requested_qtr_idx",
"]",
"requested_qtr_data",
"=",
"requested_qtr_data",
".",
"reset_index",
"(",
"SHIFTED_NORMALIZED_QTRS",
",",
")",
"# Calculate the actual year/quarter being requested and add those in",
"# as columns.",
"(",
"requested_qtr_data",
"[",
"FISCAL_YEAR_FIELD_NAME",
"]",
",",
"requested_qtr_data",
"[",
"FISCAL_QUARTER_FIELD_NAME",
"]",
")",
"=",
"split_normalized_quarters",
"(",
"requested_qtr_data",
"[",
"SHIFTED_NORMALIZED_QTRS",
"]",
")",
"# Once we're left with just dates as the index, we can reindex by all",
"# dates so that we have a value for each calendar date.",
"return",
"requested_qtr_data",
".",
"unstack",
"(",
"SID_FIELD_NAME",
")",
".",
"reindex",
"(",
"dates",
")"
] | Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom. | [
"Selects",
"the",
"requested",
"data",
"for",
"each",
"date",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L190-L253 |
25,884 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.get_split_adjusted_asof_idx | def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx | python | def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx | [
"def",
"get_split_adjusted_asof_idx",
"(",
"self",
",",
"dates",
")",
":",
"split_adjusted_asof_idx",
"=",
"dates",
".",
"searchsorted",
"(",
"self",
".",
"_split_adjusted_asof",
")",
"# The split-asof date is after the date index.",
"if",
"split_adjusted_asof_idx",
"==",
"len",
"(",
"dates",
")",
":",
"split_adjusted_asof_idx",
"=",
"len",
"(",
"dates",
")",
"-",
"1",
"elif",
"self",
".",
"_split_adjusted_asof",
"<",
"dates",
"[",
"0",
"]",
".",
"tz_localize",
"(",
"None",
")",
":",
"split_adjusted_asof_idx",
"=",
"-",
"1",
"return",
"split_adjusted_asof_idx"
] | Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split. | [
"Compute",
"the",
"index",
"in",
"dates",
"where",
"the",
"split",
"-",
"adjusted",
"-",
"asof",
"-",
"date",
"falls",
".",
"This",
"is",
"the",
"date",
"up",
"to",
"which",
"and",
"including",
"which",
"we",
"will",
"need",
"to",
"unapply",
"all",
"adjustments",
"for",
"and",
"then",
"re",
"-",
"apply",
"them",
"as",
"they",
"come",
"in",
".",
"After",
"this",
"date",
"adjustments",
"are",
"applied",
"as",
"normal",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L255-L280 |
25,885 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.collect_overwrites_for_sid | def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
) | python | def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
) | [
"def",
"collect_overwrites_for_sid",
"(",
"self",
",",
"group",
",",
"dates",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"sid_idx",
",",
"columns",
",",
"all_adjustments_for_sid",
",",
"sid",
")",
":",
"# If data was requested for only 1 date, there can never be any",
"# overwrites, so skip the extra work.",
"if",
"len",
"(",
"dates",
")",
"==",
"1",
":",
"return",
"next_qtr_start_indices",
"=",
"dates",
".",
"searchsorted",
"(",
"group",
"[",
"EVENT_DATE_FIELD_NAME",
"]",
".",
"values",
",",
"side",
"=",
"self",
".",
"searchsorted_side",
",",
")",
"qtrs_with_estimates",
"=",
"group",
".",
"index",
".",
"get_level_values",
"(",
"NORMALIZED_QUARTERS",
")",
".",
"values",
"for",
"idx",
"in",
"next_qtr_start_indices",
":",
"if",
"0",
"<",
"idx",
"<",
"len",
"(",
"dates",
")",
":",
"# Find the quarter being requested in the quarter we're",
"# crossing into.",
"requested_quarter",
"=",
"requested_qtr_data",
"[",
"SHIFTED_NORMALIZED_QTRS",
",",
"sid",
",",
"]",
".",
"iloc",
"[",
"idx",
"]",
"# Only add adjustments if the next quarter starts somewhere",
"# in our date index for this sid. Our 'next' quarter can",
"# never start at index 0; a starting index of 0 means that",
"# the next quarter's event date was NaT.",
"self",
".",
"create_overwrites_for_quarter",
"(",
"all_adjustments_for_sid",
",",
"idx",
",",
"last_per_qtr",
",",
"qtrs_with_estimates",
",",
"requested_quarter",
",",
"sid",
",",
"sid_idx",
",",
"columns",
")"
] | Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed. | [
"Given",
"a",
"sid",
"collect",
"all",
"overwrites",
"that",
"should",
"be",
"applied",
"for",
"this",
"sid",
"at",
"each",
"quarter",
"boundary",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L282-L353 |
25,886 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.merge_into_adjustments_for_all_sids | def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts) | python | def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts) | [
"def",
"merge_into_adjustments_for_all_sids",
"(",
"self",
",",
"all_adjustments_for_sid",
",",
"col_to_all_adjustments",
")",
":",
"for",
"col_name",
"in",
"all_adjustments_for_sid",
":",
"if",
"col_name",
"not",
"in",
"col_to_all_adjustments",
":",
"col_to_all_adjustments",
"[",
"col_name",
"]",
"=",
"{",
"}",
"for",
"ts",
"in",
"all_adjustments_for_sid",
"[",
"col_name",
"]",
":",
"adjs",
"=",
"all_adjustments_for_sid",
"[",
"col_name",
"]",
"[",
"ts",
"]",
"add_new_adjustments",
"(",
"col_to_all_adjustments",
",",
"adjs",
",",
"col_name",
",",
"ts",
")"
] | Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids. | [
"Merge",
"adjustments",
"for",
"a",
"particular",
"sid",
"into",
"a",
"dictionary",
"containing",
"adjustments",
"for",
"all",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L406-L429 |
25,887 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.get_adjustments | def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments | python | def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments | [
"def",
"get_adjustments",
"(",
"self",
",",
"zero_qtr_data",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"dates",
",",
"assets",
",",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"zero_qtr_data",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",
"# Here we want to get the LAST record from each group of records",
"# corresponding to a single quarter. This is to ensure that we select",
"# the most up-to-date event date in case the event date changes.",
"quarter_shifts",
"=",
"zero_qtr_data",
".",
"groupby",
"(",
"level",
"=",
"[",
"SID_FIELD_NAME",
",",
"NORMALIZED_QUARTERS",
"]",
")",
".",
"nth",
"(",
"-",
"1",
")",
"col_to_all_adjustments",
"=",
"{",
"}",
"sid_to_idx",
"=",
"dict",
"(",
"zip",
"(",
"assets",
",",
"range",
"(",
"len",
"(",
"assets",
")",
")",
")",
")",
"quarter_shifts",
".",
"groupby",
"(",
"level",
"=",
"SID_FIELD_NAME",
")",
".",
"apply",
"(",
"self",
".",
"get_adjustments_for_sid",
",",
"dates",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"sid_to_idx",
",",
"columns",
",",
"col_to_all_adjustments",
",",
"*",
"*",
"kwargs",
")",
"return",
"col_to_all_adjustments"
] | Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied. | [
"Creates",
"an",
"AdjustedArray",
"from",
"the",
"given",
"estimates",
"data",
"for",
"the",
"given",
"dates",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L431-L490 |
25,888 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.create_overwrites_for_quarter | def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx) | python | def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx) | [
"def",
"create_overwrites_for_quarter",
"(",
"self",
",",
"col_to_overwrites",
",",
"next_qtr_start_idx",
",",
"last_per_qtr",
",",
"quarters_with_estimates_for_sid",
",",
"requested_quarter",
",",
"sid",
",",
"sid_idx",
",",
"columns",
")",
":",
"for",
"col",
"in",
"columns",
":",
"column_name",
"=",
"self",
".",
"name_map",
"[",
"col",
".",
"name",
"]",
"if",
"column_name",
"not",
"in",
"col_to_overwrites",
":",
"col_to_overwrites",
"[",
"column_name",
"]",
"=",
"{",
"}",
"# If there are estimates for the requested quarter,",
"# overwrite all values going up to the starting index of",
"# that quarter with estimates for that quarter.",
"if",
"requested_quarter",
"in",
"quarters_with_estimates_for_sid",
":",
"adjs",
"=",
"self",
".",
"create_overwrite_for_estimate",
"(",
"col",
",",
"column_name",
",",
"last_per_qtr",
",",
"next_qtr_start_idx",
",",
"requested_quarter",
",",
"sid",
",",
"sid_idx",
",",
")",
"add_new_adjustments",
"(",
"col_to_overwrites",
",",
"adjs",
",",
"column_name",
",",
"next_qtr_start_idx",
")",
"# There are no estimates for the quarter. Overwrite all",
"# values going up to the starting index of that quarter",
"# with the missing value for this column.",
"else",
":",
"adjs",
"=",
"[",
"self",
".",
"overwrite_with_null",
"(",
"col",
",",
"next_qtr_start_idx",
",",
"sid_idx",
")",
"]",
"add_new_adjustments",
"(",
"col_to_overwrites",
",",
"adjs",
",",
"column_name",
",",
"next_qtr_start_idx",
")"
] | Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites. | [
"Add",
"entries",
"to",
"the",
"dictionary",
"of",
"columns",
"to",
"adjustments",
"for",
"the",
"given",
"sid",
"and",
"the",
"given",
"quarter",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L492-L563 |
25,889 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | EarningsEstimatesLoader.get_last_data_per_qtr | def get_last_data_per_qtr(self,
assets_with_data,
columns,
dates,
data_query_cutoff_times):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
data_query_cutoff_times : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of
# [self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
data_query_cutoff_times,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
last_per_qtr.index = dates
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr | python | def get_last_data_per_qtr(self,
assets_with_data,
columns,
dates,
data_query_cutoff_times):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
data_query_cutoff_times : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of
# [self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
data_query_cutoff_times,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
last_per_qtr.index = dates
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr | [
"def",
"get_last_data_per_qtr",
"(",
"self",
",",
"assets_with_data",
",",
"columns",
",",
"dates",
",",
"data_query_cutoff_times",
")",
":",
"# Get a DataFrame indexed by date with a MultiIndex of columns of",
"# [self.estimates.columns, normalized_quarters, sid], where each cell",
"# contains the latest data for that day.",
"last_per_qtr",
"=",
"last_in_date_group",
"(",
"self",
".",
"estimates",
",",
"data_query_cutoff_times",
",",
"assets_with_data",
",",
"reindex",
"=",
"True",
",",
"extra_groupers",
"=",
"[",
"NORMALIZED_QUARTERS",
"]",
",",
")",
"last_per_qtr",
".",
"index",
"=",
"dates",
"# Forward fill values for each quarter/sid/dataset column.",
"ffill_across_cols",
"(",
"last_per_qtr",
",",
"columns",
",",
"self",
".",
"name_map",
")",
"# Stack quarter and sid into the index.",
"stacked_last_per_qtr",
"=",
"last_per_qtr",
".",
"stack",
"(",
"[",
"SID_FIELD_NAME",
",",
"NORMALIZED_QUARTERS",
"]",
",",
")",
"# Set date index name for ease of reference",
"stacked_last_per_qtr",
".",
"index",
".",
"set_names",
"(",
"SIMULATION_DATES",
",",
"level",
"=",
"0",
",",
"inplace",
"=",
"True",
",",
")",
"stacked_last_per_qtr",
"=",
"stacked_last_per_qtr",
".",
"sort_values",
"(",
"EVENT_DATE_FIELD_NAME",
",",
")",
"stacked_last_per_qtr",
"[",
"EVENT_DATE_FIELD_NAME",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"stacked_last_per_qtr",
"[",
"EVENT_DATE_FIELD_NAME",
"]",
")",
"return",
"last_per_qtr",
",",
"stacked_last_per_qtr"
] | Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
data_query_cutoff_times : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid]. | [
"Determine",
"the",
"last",
"piece",
"of",
"information",
"we",
"know",
"for",
"each",
"column",
"on",
"each",
"date",
"in",
"the",
"index",
"for",
"each",
"sid",
"and",
"quarter",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L667-L725 |
25,890 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | PreviousEarningsEstimatesLoader.get_zeroth_quarter_idx | def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index | python | def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index | [
"def",
"get_zeroth_quarter_idx",
"(",
"self",
",",
"stacked_last_per_qtr",
")",
":",
"previous_releases_per_date",
"=",
"stacked_last_per_qtr",
".",
"loc",
"[",
"stacked_last_per_qtr",
"[",
"EVENT_DATE_FIELD_NAME",
"]",
"<=",
"stacked_last_per_qtr",
".",
"index",
".",
"get_level_values",
"(",
"SIMULATION_DATES",
")",
"]",
".",
"groupby",
"(",
"level",
"=",
"[",
"SIMULATION_DATES",
",",
"SID_FIELD_NAME",
"]",
",",
"as_index",
"=",
"False",
",",
"# Here we take advantage of the fact that `stacked_last_per_qtr` is",
"# sorted by event date.",
")",
".",
"nth",
"(",
"-",
"1",
")",
"return",
"previous_releases_per_date",
".",
"index"
] | Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event. | [
"Filters",
"for",
"releases",
"that",
"are",
"on",
"or",
"after",
"each",
"simulation",
"date",
"and",
"determines",
"the",
"previous",
"quarter",
"by",
"picking",
"out",
"the",
"most",
"recent",
"release",
"relative",
"to",
"each",
"date",
"in",
"the",
"index",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L810-L838 |
25,891 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.get_adjustments_for_sid | def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
) | python | def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
) | [
"def",
"get_adjustments_for_sid",
"(",
"self",
",",
"group",
",",
"dates",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"sid_to_idx",
",",
"columns",
",",
"col_to_all_adjustments",
",",
"split_adjusted_asof_idx",
"=",
"None",
",",
"split_adjusted_cols_for_group",
"=",
"None",
")",
":",
"all_adjustments_for_sid",
"=",
"{",
"}",
"sid",
"=",
"int",
"(",
"group",
".",
"name",
")",
"self",
".",
"collect_overwrites_for_sid",
"(",
"group",
",",
"dates",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"sid_to_idx",
"[",
"sid",
"]",
",",
"columns",
",",
"all_adjustments_for_sid",
",",
"sid",
")",
"(",
"pre_adjustments",
",",
"post_adjustments",
")",
"=",
"self",
".",
"retrieve_split_adjustment_data_for_sid",
"(",
"dates",
",",
"sid",
",",
"split_adjusted_asof_idx",
")",
"sid_estimates",
"=",
"self",
".",
"estimates",
"[",
"self",
".",
"estimates",
"[",
"SID_FIELD_NAME",
"]",
"==",
"sid",
"]",
"# We might not have any overwrites but still have",
"# adjustments, and we will need to manually add columns if",
"# that is the case.",
"for",
"col_name",
"in",
"split_adjusted_cols_for_group",
":",
"if",
"col_name",
"not",
"in",
"all_adjustments_for_sid",
":",
"all_adjustments_for_sid",
"[",
"col_name",
"]",
"=",
"{",
"}",
"self",
".",
"collect_split_adjustments",
"(",
"all_adjustments_for_sid",
",",
"requested_qtr_data",
",",
"dates",
",",
"sid",
",",
"sid_to_idx",
"[",
"sid",
"]",
",",
"sid_estimates",
",",
"split_adjusted_asof_idx",
",",
"pre_adjustments",
",",
"post_adjustments",
",",
"split_adjusted_cols_for_group",
")",
"self",
".",
"merge_into_adjustments_for_all_sids",
"(",
"all_adjustments_for_sid",
",",
"col_to_all_adjustments",
")"
] | Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted. | [
"Collects",
"both",
"overwrites",
"and",
"adjustments",
"for",
"a",
"particular",
"sid",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L907-L965 |
25,892 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.get_adjustments | def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
) | python | def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
) | [
"def",
"get_adjustments",
"(",
"self",
",",
"zero_qtr_data",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"dates",
",",
"assets",
",",
"columns",
",",
"*",
"*",
"kwargs",
")",
":",
"split_adjusted_cols_for_group",
"=",
"[",
"self",
".",
"name_map",
"[",
"col",
".",
"name",
"]",
"for",
"col",
"in",
"columns",
"if",
"self",
".",
"name_map",
"[",
"col",
".",
"name",
"]",
"in",
"self",
".",
"_split_adjusted_column_names",
"]",
"# Add all splits to the adjustment dict for this sid.",
"split_adjusted_asof_idx",
"=",
"self",
".",
"get_split_adjusted_asof_idx",
"(",
"dates",
")",
"return",
"super",
"(",
"SplitAdjustedEstimatesLoader",
",",
"self",
")",
".",
"get_adjustments",
"(",
"zero_qtr_data",
",",
"requested_qtr_data",
",",
"last_per_qtr",
",",
"dates",
",",
"assets",
",",
"columns",
",",
"split_adjusted_cols_for_group",
"=",
"split_adjusted_cols_for_group",
",",
"split_adjusted_asof_idx",
"=",
"split_adjusted_asof_idx",
")"
] | Calculates both split adjustments and overwrites for all sids. | [
"Calculates",
"both",
"split",
"adjustments",
"and",
"overwrites",
"for",
"all",
"sids",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L967-L996 |
25,893 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.determine_end_idx_for_adjustment | def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx | python | def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx | [
"def",
"determine_end_idx_for_adjustment",
"(",
"self",
",",
"adjustment_ts",
",",
"dates",
",",
"upper_bound",
",",
"requested_quarter",
",",
"sid_estimates",
")",
":",
"end_idx",
"=",
"upper_bound",
"# Find the next newest kd that happens on or after",
"# the date of this adjustment",
"newest_kd_for_qtr",
"=",
"sid_estimates",
"[",
"(",
"sid_estimates",
"[",
"NORMALIZED_QUARTERS",
"]",
"==",
"requested_quarter",
")",
"&",
"(",
"sid_estimates",
"[",
"TS_FIELD_NAME",
"]",
">=",
"adjustment_ts",
")",
"]",
"[",
"TS_FIELD_NAME",
"]",
".",
"min",
"(",
")",
"if",
"pd",
".",
"notnull",
"(",
"newest_kd_for_qtr",
")",
":",
"newest_kd_idx",
"=",
"dates",
".",
"searchsorted",
"(",
"newest_kd_for_qtr",
")",
"# We have fresh information that comes in",
"# before the end of the overwrite and",
"# presumably is already split-adjusted to the",
"# current split. We should stop applying the",
"# adjustment the day before this new",
"# information comes in.",
"if",
"newest_kd_idx",
"<=",
"upper_bound",
":",
"end_idx",
"=",
"newest_kd_idx",
"-",
"1",
"return",
"end_idx"
] | Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid. | [
"Determines",
"the",
"date",
"until",
"which",
"the",
"adjustment",
"at",
"the",
"given",
"date",
"index",
"should",
"be",
"applied",
"for",
"the",
"given",
"quarter",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L998-L1051 |
25,894 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.collect_pre_split_asof_date_adjustments | def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments | python | def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments | [
"def",
"collect_pre_split_asof_date_adjustments",
"(",
"self",
",",
"split_adjusted_asof_date_idx",
",",
"sid_idx",
",",
"pre_adjustments",
",",
"requested_split_adjusted_columns",
")",
":",
"col_to_split_adjustments",
"=",
"{",
"}",
"if",
"len",
"(",
"pre_adjustments",
"[",
"0",
"]",
")",
":",
"adjustment_values",
",",
"date_indexes",
"=",
"pre_adjustments",
"for",
"column_name",
"in",
"requested_split_adjusted_columns",
":",
"col_to_split_adjustments",
"[",
"column_name",
"]",
"=",
"{",
"}",
"# We need to undo all adjustments that happen before the",
"# split_asof_date here by reversing the split ratio.",
"col_to_split_adjustments",
"[",
"column_name",
"]",
"[",
"0",
"]",
"=",
"[",
"Float64Multiply",
"(",
"0",
",",
"split_adjusted_asof_date_idx",
",",
"sid_idx",
",",
"sid_idx",
",",
"1",
"/",
"future_adjustment",
")",
"for",
"future_adjustment",
"in",
"adjustment_values",
"]",
"for",
"adjustment",
",",
"date_index",
"in",
"zip",
"(",
"adjustment_values",
",",
"date_indexes",
")",
":",
"adj",
"=",
"Float64Multiply",
"(",
"0",
",",
"split_adjusted_asof_date_idx",
",",
"sid_idx",
",",
"sid_idx",
",",
"adjustment",
")",
"add_new_adjustments",
"(",
"col_to_split_adjustments",
",",
"[",
"adj",
"]",
",",
"column_name",
",",
"date_index",
")",
"return",
"col_to_split_adjustments"
] | Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date. | [
"Collect",
"split",
"adjustments",
"that",
"occur",
"before",
"the",
"split",
"-",
"adjusted",
"-",
"asof",
"-",
"date",
".",
"All",
"those",
"adjustments",
"must",
"first",
"be",
"UN",
"-",
"applied",
"at",
"the",
"first",
"date",
"index",
"and",
"then",
"re",
"-",
"applied",
"on",
"the",
"appropriate",
"dates",
"in",
"order",
"to",
"match",
"point",
"in",
"time",
"share",
"pricing",
"data",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1053-L1115 |
25,895 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.collect_post_asof_split_adjustments | def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments | python | def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments | [
"def",
"collect_post_asof_split_adjustments",
"(",
"self",
",",
"post_adjustments",
",",
"requested_qtr_data",
",",
"sid",
",",
"sid_idx",
",",
"sid_estimates",
",",
"requested_split_adjusted_columns",
")",
":",
"col_to_split_adjustments",
"=",
"{",
"}",
"if",
"post_adjustments",
":",
"# Get an integer index",
"requested_qtr_timeline",
"=",
"requested_qtr_data",
"[",
"SHIFTED_NORMALIZED_QTRS",
"]",
"[",
"sid",
"]",
".",
"reset_index",
"(",
")",
"requested_qtr_timeline",
"=",
"requested_qtr_timeline",
"[",
"requested_qtr_timeline",
"[",
"sid",
"]",
".",
"notnull",
"(",
")",
"]",
"# Split the data into range by quarter and determine which quarter",
"# was being requested in each range.",
"# Split integer indexes up by quarter range",
"qtr_ranges_idxs",
"=",
"np",
".",
"split",
"(",
"requested_qtr_timeline",
".",
"index",
",",
"np",
".",
"where",
"(",
"np",
".",
"diff",
"(",
"requested_qtr_timeline",
"[",
"sid",
"]",
")",
"!=",
"0",
")",
"[",
"0",
"]",
"+",
"1",
")",
"requested_quarters_per_range",
"=",
"[",
"requested_qtr_timeline",
"[",
"sid",
"]",
"[",
"r",
"[",
"0",
"]",
"]",
"for",
"r",
"in",
"qtr_ranges_idxs",
"]",
"# Try to apply each adjustment to each quarter range.",
"for",
"i",
",",
"qtr_range",
"in",
"enumerate",
"(",
"qtr_ranges_idxs",
")",
":",
"for",
"adjustment",
",",
"date_index",
",",
"timestamp",
"in",
"zip",
"(",
"*",
"post_adjustments",
")",
":",
"# In the default case, apply through the end of the quarter",
"upper_bound",
"=",
"qtr_range",
"[",
"-",
"1",
"]",
"# Find the smallest KD in estimates that is on or after the",
"# date of the given adjustment. Apply the given adjustment",
"# until that KD.",
"end_idx",
"=",
"self",
".",
"determine_end_idx_for_adjustment",
"(",
"timestamp",
",",
"requested_qtr_data",
".",
"index",
",",
"upper_bound",
",",
"requested_quarters_per_range",
"[",
"i",
"]",
",",
"sid_estimates",
")",
"# In the default case, apply adjustment on the first day of",
"# the quarter.",
"start_idx",
"=",
"qtr_range",
"[",
"0",
"]",
"# If the adjustment happens during this quarter, apply the",
"# adjustment on the day it happens.",
"if",
"date_index",
">",
"start_idx",
":",
"start_idx",
"=",
"date_index",
"# We only want to apply the adjustment if we have any stale",
"# data to apply it to.",
"if",
"qtr_range",
"[",
"0",
"]",
"<=",
"end_idx",
":",
"for",
"column_name",
"in",
"requested_split_adjusted_columns",
":",
"if",
"column_name",
"not",
"in",
"col_to_split_adjustments",
":",
"col_to_split_adjustments",
"[",
"column_name",
"]",
"=",
"{",
"}",
"adj",
"=",
"Float64Multiply",
"(",
"# Always apply from first day of qtr",
"qtr_range",
"[",
"0",
"]",
",",
"end_idx",
",",
"sid_idx",
",",
"sid_idx",
",",
"adjustment",
")",
"add_new_adjustments",
"(",
"col_to_split_adjustments",
",",
"[",
"adj",
"]",
",",
"column_name",
",",
"start_idx",
")",
"return",
"col_to_split_adjustments"
] | Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date. | [
"Collect",
"split",
"adjustments",
"that",
"occur",
"after",
"the",
"split",
"-",
"adjusted",
"-",
"asof",
"-",
"date",
".",
"Each",
"adjustment",
"needs",
"to",
"be",
"applied",
"to",
"all",
"dates",
"on",
"which",
"knowledge",
"for",
"the",
"requested",
"quarter",
"was",
"older",
"than",
"the",
"date",
"of",
"the",
"adjustment",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1117-L1215 |
25,896 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | SplitAdjustedEstimatesLoader.merge_split_adjustments_with_overwrites | def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
) | python | def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
) | [
"def",
"merge_split_adjustments_with_overwrites",
"(",
"self",
",",
"pre",
",",
"post",
",",
"overwrites",
",",
"requested_split_adjusted_columns",
")",
":",
"for",
"column_name",
"in",
"requested_split_adjusted_columns",
":",
"# We can do a merge here because the timestamps in 'pre' and",
"# 'post' are guaranteed to not overlap.",
"if",
"pre",
":",
"# Either empty or contains all columns.",
"for",
"ts",
"in",
"pre",
"[",
"column_name",
"]",
":",
"add_new_adjustments",
"(",
"overwrites",
",",
"pre",
"[",
"column_name",
"]",
"[",
"ts",
"]",
",",
"column_name",
",",
"ts",
")",
"if",
"post",
":",
"# Either empty or contains all columns.",
"for",
"ts",
"in",
"post",
"[",
"column_name",
"]",
":",
"add_new_adjustments",
"(",
"overwrites",
",",
"post",
"[",
"column_name",
"]",
"[",
"ts",
"]",
",",
"column_name",
",",
"ts",
")"
] | Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested. | [
"Merge",
"split",
"adjustments",
"with",
"the",
"dict",
"containing",
"overwrites",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1294-L1336 |
25,897 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | PreviousSplitAdjustedEarningsEstimatesLoader.collect_split_adjustments | def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | python | def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | [
"def",
"collect_split_adjustments",
"(",
"self",
",",
"adjustments_for_sid",
",",
"requested_qtr_data",
",",
"dates",
",",
"sid",
",",
"sid_idx",
",",
"sid_estimates",
",",
"split_adjusted_asof_idx",
",",
"pre_adjustments",
",",
"post_adjustments",
",",
"requested_split_adjusted_columns",
")",
":",
"(",
"pre_adjustments_dict",
",",
"post_adjustments_dict",
")",
"=",
"self",
".",
"_collect_adjustments",
"(",
"requested_qtr_data",
",",
"sid",
",",
"sid_idx",
",",
"sid_estimates",
",",
"split_adjusted_asof_idx",
",",
"pre_adjustments",
",",
"post_adjustments",
",",
"requested_split_adjusted_columns",
")",
"self",
".",
"merge_split_adjustments_with_overwrites",
"(",
"pre_adjustments_dict",
",",
"post_adjustments_dict",
",",
"adjustments_for_sid",
",",
"requested_split_adjusted_columns",
")"
] | Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names. | [
"Collect",
"split",
"adjustments",
"for",
"previous",
"quarters",
"and",
"apply",
"them",
"to",
"the",
"given",
"dictionary",
"of",
"splits",
"for",
"the",
"given",
"sid",
".",
"Since",
"overwrites",
"just",
"replace",
"all",
"estimates",
"before",
"the",
"new",
"quarter",
"with",
"NaN",
"we",
"don",
"t",
"need",
"to",
"worry",
"about",
"re",
"-",
"applying",
"split",
"adjustments",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1342-L1401 |
25,898 | quantopian/zipline | zipline/pipeline/loaders/earnings_estimates.py | NextSplitAdjustedEarningsEstimatesLoader.collect_split_adjustments | def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | python | def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | [
"def",
"collect_split_adjustments",
"(",
"self",
",",
"adjustments_for_sid",
",",
"requested_qtr_data",
",",
"dates",
",",
"sid",
",",
"sid_idx",
",",
"sid_estimates",
",",
"split_adjusted_asof_idx",
",",
"pre_adjustments",
",",
"post_adjustments",
",",
"requested_split_adjusted_columns",
")",
":",
"(",
"pre_adjustments_dict",
",",
"post_adjustments_dict",
")",
"=",
"self",
".",
"_collect_adjustments",
"(",
"requested_qtr_data",
",",
"sid",
",",
"sid_idx",
",",
"sid_estimates",
",",
"split_adjusted_asof_idx",
",",
"pre_adjustments",
",",
"post_adjustments",
",",
"requested_split_adjusted_columns",
",",
")",
"for",
"column_name",
"in",
"requested_split_adjusted_columns",
":",
"for",
"overwrite_ts",
"in",
"adjustments_for_sid",
"[",
"column_name",
"]",
":",
"# We need to cumulatively re-apply all adjustments up to the",
"# split-adjusted-asof-date. We might not have any",
"# pre-adjustments, so we should check for that.",
"if",
"overwrite_ts",
"<=",
"split_adjusted_asof_idx",
"and",
"pre_adjustments_dict",
":",
"for",
"split_ts",
"in",
"pre_adjustments_dict",
"[",
"column_name",
"]",
":",
"# The split has to have occurred during the span of",
"# the overwrite.",
"if",
"split_ts",
"<",
"overwrite_ts",
":",
"# Create new adjustments here so that we can",
"# re-apply all applicable adjustments to ONLY",
"# the dates being overwritten.",
"adjustments_for_sid",
"[",
"column_name",
"]",
"[",
"overwrite_ts",
"]",
".",
"extend",
"(",
"[",
"Float64Multiply",
"(",
"0",
",",
"overwrite_ts",
"-",
"1",
",",
"sid_idx",
",",
"sid_idx",
",",
"adjustment",
".",
"value",
")",
"for",
"adjustment",
"in",
"pre_adjustments_dict",
"[",
"column_name",
"]",
"[",
"split_ts",
"]",
"]",
")",
"# After the split-adjusted-asof-date, we need to re-apply all",
"# adjustments that occur after that date and within the",
"# bounds of the overwrite. They need to be applied starting",
"# from the first date and until an end date. The end date is",
"# the date of the newest information we get about",
"# `requested_quarter` that is >= `split_ts`, or if there is no",
"# new knowledge before `overwrite_ts`, then it is the date",
"# before `overwrite_ts`.",
"else",
":",
"# Overwrites happen at the first index of a new quarter,",
"# so determine here which quarter that is.",
"requested_quarter",
"=",
"requested_qtr_data",
"[",
"SHIFTED_NORMALIZED_QTRS",
",",
"sid",
"]",
".",
"iloc",
"[",
"overwrite_ts",
"]",
"for",
"adjustment_value",
",",
"date_index",
",",
"timestamp",
"in",
"zip",
"(",
"*",
"post_adjustments",
")",
":",
"if",
"split_adjusted_asof_idx",
"<",
"date_index",
"<",
"overwrite_ts",
":",
"# Assume the entire overwrite contains stale data",
"upper_bound",
"=",
"overwrite_ts",
"-",
"1",
"end_idx",
"=",
"self",
".",
"determine_end_idx_for_adjustment",
"(",
"timestamp",
",",
"dates",
",",
"upper_bound",
",",
"requested_quarter",
",",
"sid_estimates",
")",
"adjustments_for_sid",
"[",
"column_name",
"]",
"[",
"overwrite_ts",
"]",
".",
"append",
"(",
"Float64Multiply",
"(",
"0",
",",
"end_idx",
",",
"sid_idx",
",",
"sid_idx",
",",
"adjustment_value",
")",
")",
"self",
".",
"merge_split_adjustments_with_overwrites",
"(",
"pre_adjustments_dict",
",",
"post_adjustments_dict",
",",
"adjustments_for_sid",
",",
"requested_split_adjusted_columns",
")"
] | Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names. | [
"Collect",
"split",
"adjustments",
"for",
"future",
"quarters",
".",
"Re",
"-",
"apply",
"adjustments",
"that",
"would",
"be",
"overwritten",
"by",
"overwrites",
".",
"Merge",
"split",
"adjustments",
"with",
"overwrites",
"into",
"the",
"given",
"dictionary",
"of",
"splits",
"for",
"the",
"given",
"sid",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1407-L1534 |
25,899 | quantopian/zipline | zipline/pipeline/factors/basic.py | _ExponentialWeightedFactor.from_span | def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | python | def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | [
"def",
"from_span",
"(",
"cls",
",",
"inputs",
",",
"window_length",
",",
"span",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"span",
"<=",
"1",
":",
"raise",
"ValueError",
"(",
"\"`span` must be a positive number. %s was passed.\"",
"%",
"span",
")",
"decay_rate",
"=",
"(",
"1.0",
"-",
"(",
"2.0",
"/",
"(",
"1.0",
"+",
"span",
")",
")",
")",
"assert",
"0.0",
"<",
"decay_rate",
"<=",
"1.0",
"return",
"cls",
"(",
"inputs",
"=",
"inputs",
",",
"window_length",
"=",
"window_length",
",",
"decay_rate",
"=",
"decay_rate",
",",
"*",
"*",
"kwargs",
")"
] | Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`. | [
"Convenience",
"constructor",
"for",
"passing",
"decay_rate",
"in",
"terms",
"of",
"span",
"."
] | 77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L198-L240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.