Search is not available for this dataset
text
stringlengths
75
104k
def request_response(func: typing.Callable) -> ASGIApp: """ Takes a function or coroutine `func(request) -> response`, and returns an ASGI application. """ is_coroutine = asyncio.iscoroutinefunction(func) async def app(scope: Scope, receive: Receive, send: Send) -> None: request = Request(scope, receive=receive) if is_coroutine: response = await func(request) else: response = await run_in_threadpool(func, request) await response(scope, receive, send) return app
def websocket_session(func: typing.Callable) -> ASGIApp: """ Takes a coroutine `func(session)`, and returns an ASGI application. """ # assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async" async def app(scope: Scope, receive: Receive, send: Send) -> None: session = WebSocket(scope, receive=receive, send=send) await func(session) return app
def compile_path( path: str ) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]: """ Given a path string, like: "/{username:str}", return a three-tuple of (regex, format, {param_name:convertor}). regex: "/(?P<username>[^/]+)" format: "/{username}" convertors: {"username": StringConvertor()} """ path_regex = "^" path_format = "" idx = 0 param_convertors = {} for match in PARAM_REGEX.finditer(path): param_name, convertor_type = match.groups("str") convertor_type = convertor_type.lstrip(":") assert ( convertor_type in CONVERTOR_TYPES ), f"Unknown path convertor '{convertor_type}'" convertor = CONVERTOR_TYPES[convertor_type] path_regex += path[idx : match.start()] path_regex += f"(?P<{param_name}>{convertor.regex})" path_format += path[idx : match.start()] path_format += "{%s}" % param_name param_convertors[param_name] = convertor idx = match.end() path_regex += path[idx:] + "$" path_format += path[idx:] return re.compile(path_regex), path_format, param_convertors
def get_endpoints( self, routes: typing.List[BaseRoute] ) -> typing.List[EndpointInfo]: """ Given the routes, yields the following information: - path eg: /users/ - http_method one of 'get', 'post', 'put', 'patch', 'delete', 'options' - func method ready to extract the docstring """ endpoints_info: list = [] for route in routes: if isinstance(route, Mount): routes = route.routes or [] sub_endpoints = [ EndpointInfo( path="".join((route.path, sub_endpoint.path)), http_method=sub_endpoint.http_method, func=sub_endpoint.func, ) for sub_endpoint in self.get_endpoints(routes) ] endpoints_info.extend(sub_endpoints) elif not isinstance(route, Route) or not route.include_in_schema: continue elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint): for method in route.methods or ["GET"]: if method == "HEAD": continue endpoints_info.append( EndpointInfo(route.path, method.lower(), route.endpoint) ) else: for method in ["get", "post", "put", "patch", "delete", "options"]: if not hasattr(route.endpoint, method): continue func = getattr(route.endpoint, method) endpoints_info.append( EndpointInfo(route.path, method.lower(), func) ) return endpoints_info
def parse_docstring(self, func_or_method: typing.Callable) -> dict: """ Given a function, parse the docstring as YAML and return a dictionary of info. """ docstring = func_or_method.__doc__ if not docstring: return {} # We support having regular docstrings before the schema # definition. Here we return just the schema part from # the docstring. docstring = docstring.split("---")[-1] parsed = yaml.safe_load(docstring) if not isinstance(parsed, dict): # A regular docstring (not yaml formatted) can return # a simple string here, which wouldn't follow the schema. return {} return parsed
def get_directories( self, directory: str = None, packages: typing.List[str] = None ) -> typing.List[str]: """ Given `directory` and `packages` arugments, return a list of all the directories that should be used for serving static files from. """ directories = [] if directory is not None: directories.append(directory) for package in packages or []: spec = importlib.util.find_spec(package) assert spec is not None, f"Package {package!r} could not be found." assert ( spec.origin is not None ), "Directory 'statics' in package {package!r} could not be found." directory = os.path.normpath(os.path.join(spec.origin, "..", "statics")) assert os.path.isdir( directory ), "Directory 'statics' in package {package!r} could not be found." directories.append(directory) return directories
def get_path(self, scope: Scope) -> str: """ Given the ASGI scope, return the `path` string to serve up, with OS specific path seperators, and any '..', '.' components removed. """ return os.path.normpath(os.path.join(*scope["path"].split("/")))
async def get_response(self, path: str, scope: Scope) -> Response: """ Returns an HTTP response, given the incoming path, method and request headers. """ if scope["method"] not in ("GET", "HEAD"): return PlainTextResponse("Method Not Allowed", status_code=405) if path.startswith(".."): # Most clients will normalize the path, so we shouldn't normally # get this, but don't allow misbehaving clients to break out of # the static files directory. return PlainTextResponse("Not Found", status_code=404) full_path, stat_result = await self.lookup_path(path) if stat_result and stat.S_ISREG(stat_result.st_mode): # We have a static file to serve. return self.file_response(full_path, stat_result, scope) elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: # We're in HTML mode, and have got a directory URL. # Check if we have 'index.html' file to serve. index_path = os.path.join(path, "index.html") full_path, stat_result = await self.lookup_path(index_path) if stat_result is not None and stat.S_ISREG(stat_result.st_mode): if not scope["path"].endswith("/"): # Directory URLs should redirect to always end in "/". url = URL(scope=scope) url = url.replace(path=url.path + "/") return RedirectResponse(url=url) return self.file_response(full_path, stat_result, scope) if self.html: # Check for '404.html' if we're in HTML mode. full_path, stat_result = await self.lookup_path("404.html") if stat_result is not None and stat.S_ISREG(stat_result.st_mode): return self.file_response( full_path, stat_result, scope, status_code=404 ) return PlainTextResponse("Not Found", status_code=404)
async def check_config(self) -> None: """ Perform a one-off configuration check that StaticFiles is actually pointed at a directory, so that we can raise loud errors rather than just returning 404 responses. """ if self.directory is None: return try: stat_result = await aio_stat(self.directory) except FileNotFoundError: raise RuntimeError( f"StaticFiles directory '{self.directory}' does not exist." ) if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)): raise RuntimeError( f"StaticFiles path '{self.directory}' is not a directory." )
def is_not_modified( self, response_headers: Headers, request_headers: Headers ) -> bool: """ Given the request and response headers, return `True` if an HTTP "Not Modified" response could be returned instead. """ try: if_none_match = request_headers["if-none-match"] etag = response_headers["etag"] if if_none_match == etag: return True except KeyError: pass try: if_modified_since = parsedate(request_headers["if-modified-since"]) last_modified = parsedate(response_headers["last-modified"]) if ( if_modified_since is not None and last_modified is not None and if_modified_since >= last_modified ): return True except KeyError: pass return False
def build_environ(scope: Scope, body: bytes) -> dict: """ Builds a scope and request body into a WSGI environ object. """ environ = { "REQUEST_METHOD": scope["method"], "SCRIPT_NAME": scope.get("root_path", ""), "PATH_INFO": scope["path"], "QUERY_STRING": scope["query_string"].decode("ascii"), "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}", "wsgi.version": (1, 0), "wsgi.url_scheme": scope.get("scheme", "http"), "wsgi.input": io.BytesIO(body), "wsgi.errors": sys.stdout, "wsgi.multithread": True, "wsgi.multiprocess": True, "wsgi.run_once": False, } # Get server name and port - required in WSGI, not in ASGI server = scope.get("server") or ("localhost", 80) environ["SERVER_NAME"] = server[0] environ["SERVER_PORT"] = server[1] # Get client IP address if scope.get("client"): environ["REMOTE_ADDR"] = scope["client"][0] # Go through headers and make them into environ entries for name, value in scope.get("headers", []): name = name.decode("latin1") if name == "content-length": corrected_name = "CONTENT_LENGTH" elif name == "content-type": corrected_name = "CONTENT_TYPE" else: corrected_name = f"HTTP_{name}".upper().replace("-", "_") # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case value = value.decode("latin1") if corrected_name in environ: value = environ[corrected_name] + "," + value environ[corrected_name] = value return environ
async def receive(self) -> Message: """ Receive ASGI websocket messages, ensuring valid state transitions. """ if self.client_state == WebSocketState.CONNECTING: message = await self._receive() message_type = message["type"] assert message_type == "websocket.connect" self.client_state = WebSocketState.CONNECTED return message elif self.client_state == WebSocketState.CONNECTED: message = await self._receive() message_type = message["type"] assert message_type in {"websocket.receive", "websocket.disconnect"} if message_type == "websocket.disconnect": self.client_state = WebSocketState.DISCONNECTED return message else: raise RuntimeError( 'Cannot call "receive" once a disconnect message has been received.' )
async def send(self, message: Message) -> None: """ Send ASGI websocket messages, ensuring valid state transitions. """ if self.application_state == WebSocketState.CONNECTING: message_type = message["type"] assert message_type in {"websocket.accept", "websocket.close"} if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED else: self.application_state = WebSocketState.CONNECTED await self._send(message) elif self.application_state == WebSocketState.CONNECTED: message_type = message["type"] assert message_type in {"websocket.send", "websocket.close"} if message_type == "websocket.close": self.application_state = WebSocketState.DISCONNECTED await self._send(message) else: raise RuntimeError('Cannot call "send" once a close message has been sent.')
def get_top_long_short_abs(positions, top=10): """ Finds the top long, short, and absolute positions. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. top : int, optional How many of each to find (default 10). Returns ------- df_top_long : pd.DataFrame Top long positions. df_top_short : pd.DataFrame Top short positions. df_top_abs : pd.DataFrame Top absolute positions. """ positions = positions.drop('cash', axis='columns') df_max = positions.max() df_min = positions.min() df_abs_max = positions.abs().max() df_top_long = df_max[df_max > 0].nlargest(top) df_top_short = df_min[df_min < 0].nsmallest(top) df_top_abs = df_abs_max.nlargest(top) return df_top_long, df_top_short, df_top_abs
def get_max_median_position_concentration(positions): """ Finds the max and median long and short position concentrations in each time period specified by the index of positions. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. Returns ------- pd.DataFrame Columns are max long, max short, median long, and median short position concentrations. Rows are timeperiods. """ expos = get_percent_alloc(positions) expos = expos.drop('cash', axis=1) longs = expos.where(expos.applymap(lambda x: x > 0)) shorts = expos.where(expos.applymap(lambda x: x < 0)) alloc_summary = pd.DataFrame() alloc_summary['max_long'] = longs.max(axis=1) alloc_summary['median_long'] = longs.median(axis=1) alloc_summary['median_short'] = shorts.median(axis=1) alloc_summary['max_short'] = shorts.min(axis=1) return alloc_summary
def extract_pos(positions, cash): """ Extract position values from backtest object as returned by get_backtest() on the Quantopian research platform. Parameters ---------- positions : pd.DataFrame timeseries containing one row per symbol (and potentially duplicate datetime indices) and columns for amount and last_sale_price. cash : pd.Series timeseries containing cash in the portfolio. Returns ------- pd.DataFrame Daily net position values. - See full explanation in tears.create_full_tear_sheet. """ positions = positions.copy() positions['values'] = positions.amount * positions.last_sale_price cash.name = 'cash' values = positions.reset_index().pivot_table(index='index', columns='sid', values='values') if ZIPLINE: for asset in values.columns: if type(asset) in [Equity, Future]: values[asset] = values[asset] * asset.price_multiplier values = values.join(cash).fillna(0) # NOTE: Set name of DataFrame.columns to sid, to match the behavior # of DataFrame.join in earlier versions of pandas. values.columns.name = 'sid' return values
def get_sector_exposures(positions, symbol_sector_map): """ Sum position exposures by sector. Parameters ---------- positions : pd.DataFrame Contains position values or amounts. - Example index 'AAPL' 'MSFT' 'CHK' cash 2004-01-09 13939.380 -15012.993 -403.870 1477.483 2004-01-12 14492.630 -18624.870 142.630 3989.610 2004-01-13 -13853.280 13653.640 -100.980 100.000 symbol_sector_map : dict or pd.Series Security identifier to sector mapping. Security ids as keys/index, sectors as values. - Example: {'AAPL' : 'Technology' 'MSFT' : 'Technology' 'CHK' : 'Natural Resources'} Returns ------- sector_exp : pd.DataFrame Sectors and their allocations. - Example: index 'Technology' 'Natural Resources' cash 2004-01-09 -1073.613 -403.870 1477.4830 2004-01-12 -4132.240 142.630 3989.6100 2004-01-13 -199.640 -100.980 100.0000 """ cash = positions['cash'] positions = positions.drop('cash', axis=1) unmapped_pos = np.setdiff1d(positions.columns.values, list(symbol_sector_map.keys())) if len(unmapped_pos) > 0: warn_message = """Warning: Symbols {} have no sector mapping. They will not be included in sector allocations""".format( ", ".join(map(str, unmapped_pos))) warnings.warn(warn_message, UserWarning) sector_exp = positions.groupby( by=symbol_sector_map, axis=1).sum() sector_exp['cash'] = cash return sector_exp
def get_long_short_pos(positions): """ Determines the long and short allocations in a portfolio. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. Returns ------- df_long_short : pd.DataFrame Long and short allocations as a decimal percentage of the total net liquidation """ pos_wo_cash = positions.drop('cash', axis=1) longs = pos_wo_cash[pos_wo_cash > 0].sum(axis=1).fillna(0) shorts = pos_wo_cash[pos_wo_cash < 0].sum(axis=1).fillna(0) cash = positions.cash net_liquidation = longs + shorts + cash df_pos = pd.DataFrame({'long': longs.divide(net_liquidation, axis='index'), 'short': shorts.divide(net_liquidation, axis='index')}) df_pos['net exposure'] = df_pos['long'] + df_pos['short'] return df_pos
def compute_style_factor_exposures(positions, risk_factor): """ Returns style factor exposure of an algorithm's positions Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - See full explanation in create_risk_tear_sheet risk_factor : pd.DataFrame Daily risk factor per asset. - DataFrame with dates as index and equities as columns - Example: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 -0.51284 1.39173 2017-04-04 -0.73381 0.98149 2017-04-05 -0.90132 1.13981 """ positions_wo_cash = positions.drop('cash', axis='columns') gross_exposure = positions_wo_cash.abs().sum(axis='columns') style_factor_exposure = positions_wo_cash.multiply(risk_factor) \ .divide(gross_exposure, axis='index') tot_style_factor_exposure = style_factor_exposure.sum(axis='columns', skipna=True) return tot_style_factor_exposure
def plot_style_factor_exposures(tot_style_factor_exposure, factor_name=None, ax=None): """ Plots DataFrame output of compute_style_factor_exposures as a line graph Parameters ---------- tot_style_factor_exposure : pd.Series Daily style factor exposures (output of compute_style_factor_exposures) - Time series with decimal style factor exposures - Example: 2017-04-24 0.037820 2017-04-25 0.016413 2017-04-26 -0.021472 2017-04-27 -0.024859 factor_name : string Name of style factor, for use in graph title - Defaults to tot_style_factor_exposure.name """ if ax is None: ax = plt.gca() if factor_name is None: factor_name = tot_style_factor_exposure.name ax.plot(tot_style_factor_exposure.index, tot_style_factor_exposure, label=factor_name) avg = tot_style_factor_exposure.mean() ax.axhline(avg, linestyle='-.', label='Mean = {:.3}'.format(avg)) ax.axhline(0, color='k', linestyle='-') _, _, y1, y2 = plt.axis() lim = max(abs(y1), abs(y2)) ax.set(title='Exposure to {}'.format(factor_name), ylabel='{} \n weighted exposure'.format(factor_name), ylim=(-lim, lim)) ax.legend(frameon=True, framealpha=0.5) return ax
def compute_sector_exposures(positions, sectors, sector_dict=SECTORS): """ Returns arrays of long, short and gross sector exposures of an algorithm's positions Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - See full explanation in compute_style_factor_exposures. sectors : pd.DataFrame Daily Morningstar sector code per asset - See full explanation in create_risk_tear_sheet sector_dict : dict or OrderedDict Dictionary of all sectors - Keys are sector codes (e.g. ints or strings) and values are sector names (which must be strings) - Defaults to Morningstar sectors """ sector_ids = sector_dict.keys() long_exposures = [] short_exposures = [] gross_exposures = [] net_exposures = [] positions_wo_cash = positions.drop('cash', axis='columns') long_exposure = positions_wo_cash[positions_wo_cash > 0] \ .sum(axis='columns') short_exposure = positions_wo_cash[positions_wo_cash < 0] \ .abs().sum(axis='columns') gross_exposure = positions_wo_cash.abs().sum(axis='columns') for sector_id in sector_ids: in_sector = positions_wo_cash[sectors == sector_id] long_sector = in_sector[in_sector > 0] \ .sum(axis='columns').divide(long_exposure) short_sector = in_sector[in_sector < 0] \ .sum(axis='columns').divide(short_exposure) gross_sector = in_sector.abs().sum(axis='columns') \ .divide(gross_exposure) net_sector = long_sector.subtract(short_sector) long_exposures.append(long_sector) short_exposures.append(short_sector) gross_exposures.append(gross_sector) net_exposures.append(net_sector) return long_exposures, short_exposures, gross_exposures, net_exposures
def plot_sector_exposures_longshort(long_exposures, short_exposures, sector_dict=SECTORS, ax=None): """ Plots outputs of compute_sector_exposures as area charts Parameters ---------- long_exposures, short_exposures : arrays Arrays of long and short sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures """ if ax is None: ax = plt.gca() if sector_dict is None: sector_names = SECTORS.values() else: sector_names = sector_dict.values() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11)) ax.stackplot(long_exposures[0].index, long_exposures, labels=sector_names, colors=color_list, alpha=0.8, baseline='zero') ax.stackplot(long_exposures[0].index, short_exposures, colors=color_list, alpha=0.8, baseline='zero') ax.axhline(0, color='k', linestyle='-') ax.set(title='Long and short exposures to sectors', ylabel='Proportion of long/short exposure in sectors') ax.legend(loc='upper left', frameon=True, framealpha=0.5) return ax
def plot_sector_exposures_gross(gross_exposures, sector_dict=None, ax=None): """ Plots output of compute_sector_exposures as area charts Parameters ---------- gross_exposures : arrays Arrays of gross sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures """ if ax is None: ax = plt.gca() if sector_dict is None: sector_names = SECTORS.values() else: sector_names = sector_dict.values() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11)) ax.stackplot(gross_exposures[0].index, gross_exposures, labels=sector_names, colors=color_list, alpha=0.8, baseline='zero') ax.axhline(0, color='k', linestyle='-') ax.set(title='Gross exposure to sectors', ylabel='Proportion of gross exposure \n in sectors') return ax
def plot_sector_exposures_net(net_exposures, sector_dict=None, ax=None): """ Plots output of compute_sector_exposures as line graphs Parameters ---------- net_exposures : arrays Arrays of net sector exposures (output of compute_sector_exposures). sector_dict : dict or OrderedDict Dictionary of all sectors - See full description in compute_sector_exposures """ if ax is None: ax = plt.gca() if sector_dict is None: sector_names = SECTORS.values() else: sector_names = sector_dict.values() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 11)) for i in range(len(net_exposures)): ax.plot(net_exposures[i], color=color_list[i], alpha=0.8, label=sector_names[i]) ax.set(title='Net exposures to sectors', ylabel='Proportion of net exposure \n in sectors') return ax
def compute_cap_exposures(positions, caps): """ Returns arrays of long, short and gross market cap exposures of an algorithm's positions Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - See full explanation in compute_style_factor_exposures. caps : pd.DataFrame Daily Morningstar sector code per asset - See full explanation in create_risk_tear_sheet """ long_exposures = [] short_exposures = [] gross_exposures = [] net_exposures = [] positions_wo_cash = positions.drop('cash', axis='columns') tot_gross_exposure = positions_wo_cash.abs().sum(axis='columns') tot_long_exposure = positions_wo_cash[positions_wo_cash > 0] \ .sum(axis='columns') tot_short_exposure = positions_wo_cash[positions_wo_cash < 0] \ .abs().sum(axis='columns') for bucket_name, boundaries in CAP_BUCKETS.items(): in_bucket = positions_wo_cash[(caps >= boundaries[0]) & (caps <= boundaries[1])] gross_bucket = in_bucket.abs().sum(axis='columns') \ .divide(tot_gross_exposure) long_bucket = in_bucket[in_bucket > 0] \ .sum(axis='columns').divide(tot_long_exposure) short_bucket = in_bucket[in_bucket < 0] \ .sum(axis='columns').divide(tot_short_exposure) net_bucket = long_bucket.subtract(short_bucket) gross_exposures.append(gross_bucket) long_exposures.append(long_bucket) short_exposures.append(short_bucket) net_exposures.append(net_bucket) return long_exposures, short_exposures, gross_exposures, net_exposures
def plot_cap_exposures_longshort(long_exposures, short_exposures, ax=None): """ Plots outputs of compute_cap_exposures as area charts Parameters ---------- long_exposures, short_exposures : arrays Arrays of long and short market cap exposures (output of compute_cap_exposures). """ if ax is None: ax = plt.gca() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5)) ax.stackplot(long_exposures[0].index, long_exposures, labels=CAP_BUCKETS.keys(), colors=color_list, alpha=0.8, baseline='zero') ax.stackplot(long_exposures[0].index, short_exposures, colors=color_list, alpha=0.8, baseline='zero') ax.axhline(0, color='k', linestyle='-') ax.set(title='Long and short exposures to market caps', ylabel='Proportion of long/short exposure in market cap buckets') ax.legend(loc='upper left', frameon=True, framealpha=0.5) return ax
def plot_cap_exposures_gross(gross_exposures, ax=None): """ Plots outputs of compute_cap_exposures as area charts Parameters ---------- gross_exposures : array Arrays of gross market cap exposures (output of compute_cap_exposures). """ if ax is None: ax = plt.gca() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5)) ax.stackplot(gross_exposures[0].index, gross_exposures, labels=CAP_BUCKETS.keys(), colors=color_list, alpha=0.8, baseline='zero') ax.axhline(0, color='k', linestyle='-') ax.set(title='Gross exposure to market caps', ylabel='Proportion of gross exposure \n in market cap buckets') return ax
def plot_cap_exposures_net(net_exposures, ax=None): """ Plots outputs of compute_cap_exposures as line graphs Parameters ---------- net_exposures : array Arrays of gross market cap exposures (output of compute_cap_exposures). """ if ax is None: ax = plt.gca() color_list = plt.cm.gist_rainbow(np.linspace(0, 1, 5)) cap_names = CAP_BUCKETS.keys() for i in range(len(net_exposures)): ax.plot(net_exposures[i], color=color_list[i], alpha=0.8, label=cap_names[i]) ax.axhline(0, color='k', linestyle='-') ax.set(title='Net exposure to market caps', ylabel='Proportion of net exposure \n in market cap buckets') return ax
def compute_volume_exposures(shares_held, volumes, percentile): """ Returns arrays of pth percentile of long, short and gross volume exposures of an algorithm's held shares Parameters ---------- shares_held : pd.DataFrame Daily number of shares held by an algorithm. - See full explanation in create_risk_tear_sheet volume : pd.DataFrame Daily volume per asset - See full explanation in create_risk_tear_sheet percentile : float Percentile to use when computing and plotting volume exposures - See full explanation in create_risk_tear_sheet """ shares_held = shares_held.replace(0, np.nan) shares_longed = shares_held[shares_held > 0] shares_shorted = -1 * shares_held[shares_held < 0] shares_grossed = shares_held.abs() longed_frac = shares_longed.divide(volumes) shorted_frac = shares_shorted.divide(volumes) grossed_frac = shares_grossed.divide(volumes) # NOTE: To work around a bug in `quantile` with nan-handling in # pandas 0.18, use np.nanpercentile by applying to each row of # the dataframe. This is fixed in pandas 0.19. # # longed_threshold = 100*longed_frac.quantile(percentile, axis='columns') # shorted_threshold = 100*shorted_frac.quantile(percentile, axis='columns') # grossed_threshold = 100*grossed_frac.quantile(percentile, axis='columns') longed_threshold = 100 * longed_frac.apply( partial(np.nanpercentile, q=100 * percentile), axis='columns', ) shorted_threshold = 100 * shorted_frac.apply( partial(np.nanpercentile, q=100 * percentile), axis='columns', ) grossed_threshold = 100 * grossed_frac.apply( partial(np.nanpercentile, q=100 * percentile), axis='columns', ) return longed_threshold, shorted_threshold, grossed_threshold
def plot_volume_exposures_longshort(longed_threshold, shorted_threshold, percentile, ax=None): """ Plots outputs of compute_volume_exposures as line graphs Parameters ---------- longed_threshold, shorted_threshold : pd.Series Series of longed and shorted volume exposures (output of compute_volume_exposures). percentile : float Percentile to use when computing and plotting volume exposures. - See full explanation in create_risk_tear_sheet """ if ax is None: ax = plt.gca() ax.plot(longed_threshold.index, longed_threshold, color='b', label='long') ax.plot(shorted_threshold.index, shorted_threshold, color='r', label='short') ax.axhline(0, color='k') ax.set(title='Long and short exposures to illiquidity', ylabel='{}th percentile of proportion of volume (%)' .format(100 * percentile)) ax.legend(frameon=True, framealpha=0.5) return ax
def plot_volume_exposures_gross(grossed_threshold, percentile, ax=None): """ Plots outputs of compute_volume_exposures as line graphs Parameters ---------- grossed_threshold : pd.Series Series of grossed volume exposures (output of compute_volume_exposures). percentile : float Percentile to use when computing and plotting volume exposures - See full explanation in create_risk_tear_sheet """ if ax is None: ax = plt.gca() ax.plot(grossed_threshold.index, grossed_threshold, color='b', label='gross') ax.axhline(0, color='k') ax.set(title='Gross exposure to illiquidity', ylabel='{}th percentile of \n proportion of volume (%)' .format(100 * percentile)) ax.legend(frameon=True, framealpha=0.5) return ax
def create_full_tear_sheet(returns, positions=None, transactions=None, market_data=None, benchmark_rets=None, slippage=None, live_start_date=None, sector_mappings=None, bayesian=False, round_trips=False, estimate_intraday='infer', hide_positions=False, cone_std=(1.0, 1.5, 2.0), bootstrap=False, unadjusted_returns=None, style_factor_panel=None, sectors=None, caps=None, shares_held=None, volumes=None, percentile=None, turnover_denom='AGB', set_context=True, factor_returns=None, factor_loadings=None, pos_in_dollars=True, header_rows=None, factor_partitions=FACTOR_PARTITIONS): """ Generate a number of tear sheets that are useful for analyzing a strategy's performance. - Fetches benchmarks if needed. - Creates tear sheets for returns, and significant events. If possible, also creates tear sheets for position analysis, transaction analysis, and Bayesian analysis. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - Time series with decimal returns. - Example: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 positions : pd.DataFrame, optional Daily net position values. - Time series of dollar amount invested in each position and cash. - Days where stocks are not held can be represented by 0 or NaN. - Non-working capital is labelled 'cash' - Example: index 'AAPL' 'MSFT' cash 2004-01-09 13939.3800 -14012.9930 711.5585 2004-01-12 14492.6300 -14624.8700 27.1821 2004-01-13 -13853.2800 13653.6400 -43.6375 transactions : pd.DataFrame, optional Executed trade volumes and fill prices. - One row per trade. - Trades on different names that occur at the same time will have identical indicies. - Example: index amount price symbol 2004-01-09 12:18:01 483 324.12 'AAPL' 2004-01-09 12:18:01 122 83.10 'MSFT' 2004-01-13 14:12:23 -75 340.43 'AAPL' market_data : pd.Panel, optional Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). slippage : int/float, optional Basis points of slippage to apply to returns before generating tearsheet stats and plots. If a value is provided, slippage parameter sweep plots will be generated from the unadjusted returns. Transactions and positions must also be passed. - See txn.adjust_returns_for_slippage for more details. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. This datetime should be normalized. hide_positions : bool, optional If True, will not output any symbol names. bayesian: boolean, optional If True, causes the generation of a Bayesian tear sheet. round_trips: boolean, optional If True, causes the generation of a round trip tear sheet. sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. estimate_intraday: boolean or str, optional Instead of using the end-of-day positions, use the point in the day where we have the most $ invested. This will adjust positions to better approximate and represent how an intraday strategy behaves. By default, this is 'infer', and an attempt will be made to detect an intraday strategy. Specifying this value will prevent detection. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - The cone is a normal distribution with this standard deviation centered around a linear regression. bootstrap : boolean (optional) Whether to perform bootstrap analysis for the performance metrics. Takes a few minutes longer. turnover_denom : str Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. factor_returns : pd.Dataframe, optional Returns by factor, with date as index and factors as columns factor_loadings : pd.Dataframe, optional Factor loadings for all days in the date range, with date and ticker as index, and factors as columns. pos_in_dollars : boolean, optional indicates whether positions is in dollars header_rows : dict or OrderedDict, optional Extra rows to display at the top of the perf stats table. set_context : boolean, optional If True, set default plotting style context. - See plotting.context(). factor_partitions : dict, optional dict specifying how factors should be separated in perf attrib factor returns and risk exposures plots - See create_perf_attrib_tear_sheet(). """ if (unadjusted_returns is None) and (slippage is not None) and\ (transactions is not None): unadjusted_returns = returns.copy() returns = txn.adjust_returns_for_slippage(returns, positions, transactions, slippage) positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) create_returns_tear_sheet( returns, positions=positions, transactions=transactions, live_start_date=live_start_date, cone_std=cone_std, benchmark_rets=benchmark_rets, bootstrap=bootstrap, turnover_denom=turnover_denom, header_rows=header_rows, set_context=set_context) create_interesting_times_tear_sheet(returns, benchmark_rets=benchmark_rets, set_context=set_context) if positions is not None: create_position_tear_sheet(returns, positions, hide_positions=hide_positions, set_context=set_context, sector_mappings=sector_mappings, estimate_intraday=False) if transactions is not None: create_txn_tear_sheet(returns, positions, transactions, unadjusted_returns=unadjusted_returns, estimate_intraday=False, set_context=set_context) if round_trips: create_round_trip_tear_sheet( returns=returns, positions=positions, transactions=transactions, sector_mappings=sector_mappings, estimate_intraday=False) if market_data is not None: create_capacity_tear_sheet(returns, positions, transactions, market_data, liquidation_daily_vol_limit=0.2, last_n_days=125, estimate_intraday=False) if style_factor_panel is not None: create_risk_tear_sheet(positions, style_factor_panel, sectors, caps, shares_held, volumes, percentile) if factor_returns is not None and factor_loadings is not None: create_perf_attrib_tear_sheet(returns, positions, factor_returns, factor_loadings, transactions, pos_in_dollars=pos_in_dollars, factor_partitions=factor_partitions) if bayesian: create_bayesian_tear_sheet(returns, live_start_date=live_start_date, benchmark_rets=benchmark_rets, set_context=set_context)
def create_simple_tear_sheet(returns, positions=None, transactions=None, benchmark_rets=None, slippage=None, estimate_intraday='infer', live_start_date=None, turnover_denom='AGB', header_rows=None): """ Simpler version of create_full_tear_sheet; generates summary performance statistics and important plots as a single image. - Plots: cumulative returns, rolling beta, rolling Sharpe, underwater, exposure, top 10 holdings, total holdings, long/short holdings, daily turnover, transaction time distribution. - Never accept market_data input (market_data = None) - Never accept sector_mappings input (sector_mappings = None) - Never perform bootstrap analysis (bootstrap = False) - Never hide posistions on top 10 holdings plot (hide_positions = False) - Always use default cone_std (cone_std = (1.0, 1.5, 2.0)) Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - Time series with decimal returns. - Example: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 positions : pd.DataFrame, optional Daily net position values. - Time series of dollar amount invested in each position and cash. - Days where stocks are not held can be represented by 0 or NaN. - Non-working capital is labelled 'cash' - Example: index 'AAPL' 'MSFT' cash 2004-01-09 13939.3800 -14012.9930 711.5585 2004-01-12 14492.6300 -14624.8700 27.1821 2004-01-13 -13853.2800 13653.6400 -43.6375 transactions : pd.DataFrame, optional Executed trade volumes and fill prices. - One row per trade. - Trades on different names that occur at the same time will have identical indicies. - Example: index amount price symbol 2004-01-09 12:18:01 483 324.12 'AAPL' 2004-01-09 12:18:01 122 83.10 'MSFT' 2004-01-13 14:12:23 -75 340.43 'AAPL' benchmark_rets : pd.Series, optional Daily returns of the benchmark, noncumulative. slippage : int/float, optional Basis points of slippage to apply to returns before generating tearsheet stats and plots. If a value is provided, slippage parameter sweep plots will be generated from the unadjusted returns. Transactions and positions must also be passed. - See txn.adjust_returns_for_slippage for more details. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. This datetime should be normalized. turnover_denom : str, optional Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. header_rows : dict or OrderedDict, optional Extra rows to display at the top of the perf stats table. set_context : boolean, optional If True, set default plotting style context. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) if (slippage is not None) and (transactions is not None): returns = txn.adjust_returns_for_slippage(returns, positions, transactions, slippage) always_sections = 4 positions_sections = 4 if positions is not None else 0 transactions_sections = 2 if transactions is not None else 0 live_sections = 1 if live_start_date is not None else 0 benchmark_sections = 1 if benchmark_rets is not None else 0 vertical_sections = sum([ always_sections, positions_sections, transactions_sections, live_sections, benchmark_sections, ]) if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) plotting.show_perf_stats(returns, benchmark_rets, positions=positions, transactions=transactions, turnover_denom=turnover_denom, live_start_date=live_start_date, header_rows=header_rows) fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_rolling_returns = plt.subplot(gs[:2, :]) i = 2 if benchmark_rets is not None: ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 plotting.plot_rolling_returns(returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=(1.0, 1.5, 2.0), ax=ax_rolling_returns) ax_rolling_returns.set_title('Cumulative returns') if benchmark_rets is not None: plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta) plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe) plotting.plot_drawdown_underwater(returns, ax=ax_underwater) if positions is not None: # Plot simple positions tear sheet ax_exposures = plt.subplot(gs[i, :]) i += 1 ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures) i += 1 ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures) i += 1 ax_long_short_holdings = plt.subplot(gs[i, :]) i += 1 positions_alloc = pos.get_percent_alloc(positions) plotting.plot_exposures(returns, positions, ax=ax_exposures) plotting.show_and_plot_top_positions(returns, positions_alloc, show_and_plot=0, hide_positions=False, ax=ax_top_positions) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) plotting.plot_long_short_holdings(returns, positions_alloc, ax=ax_long_short_holdings) if transactions is not None: # Plot simple transactions tear sheet ax_turnover = plt.subplot(gs[i, :]) i += 1 ax_txn_timings = plt.subplot(gs[i, :]) i += 1 plotting.plot_turnover(returns, transactions, positions, ax=ax_turnover) plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True)
def create_returns_tear_sheet(returns, positions=None, transactions=None, live_start_date=None, cone_std=(1.0, 1.5, 2.0), benchmark_rets=None, bootstrap=False, turnover_denom='AGB', header_rows=None, return_fig=False): """ Generate a number of plots for analyzing a strategy's returns. - Fetches benchmarks, then creates the plots on a single figure. - Plots: rolling returns (with cone), rolling beta, rolling sharpe, rolling Fama-French risk factors, drawdowns, underwater plot, monthly and annual return plots, daily similarity plots, and return quantile box plot. - Will also print the start and end dates of the strategy, performance statistics, drawdown periods, and the return range. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame, optional Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame, optional Executed trade volumes and fill prices. - See full explanation in create_full_tear_sheet. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - The cone is a normal distribution with this standard deviation centered around a linear regression. benchmark_rets : pd.Series, optional Daily noncumulative returns of the benchmark. - This is in the same style as returns. bootstrap : boolean, optional Whether to perform bootstrap analysis for the performance metrics. Takes a few minutes longer. turnover_denom : str, optional Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. header_rows : dict or OrderedDict, optional Extra rows to display at the top of the perf stats table. return_fig : boolean, optional If True, returns the figure that was plotted on. """ if benchmark_rets is not None: returns = utils.clip_returns_to_benchmark(returns, benchmark_rets) plotting.show_perf_stats(returns, benchmark_rets, positions=positions, transactions=transactions, turnover_denom=turnover_denom, bootstrap=bootstrap, live_start_date=live_start_date, header_rows=header_rows) plotting.show_worst_drawdown_periods(returns) vertical_sections = 11 if live_start_date is not None: vertical_sections += 1 live_start_date = ep.utils.get_utc_timestamp(live_start_date) if benchmark_rets is not None: vertical_sections += 1 if bootstrap: vertical_sections += 1 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_rolling_returns = plt.subplot(gs[:2, :]) i = 2 ax_rolling_returns_vol_match = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_returns_log = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_returns = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 if benchmark_rets is not None: ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_monthly_heatmap = plt.subplot(gs[i, 0]) ax_annual_returns = plt.subplot(gs[i, 1]) ax_monthly_dist = plt.subplot(gs[i, 2]) i += 1 ax_return_quantiles = plt.subplot(gs[i, :]) i += 1 plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=cone_std, ax=ax_rolling_returns) ax_rolling_returns.set_title( 'Cumulative returns') plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=None, volatility_match=(benchmark_rets is not None), legend_loc=None, ax=ax_rolling_returns_vol_match) ax_rolling_returns_vol_match.set_title( 'Cumulative returns volatility matched to benchmark') plotting.plot_rolling_returns( returns, factor_returns=benchmark_rets, logy=True, live_start_date=live_start_date, cone_std=cone_std, ax=ax_rolling_returns_log) ax_rolling_returns_log.set_title( 'Cumulative returns on logarithmic scale') plotting.plot_returns( returns, live_start_date=live_start_date, ax=ax_returns, ) ax_returns.set_title( 'Returns') if benchmark_rets is not None: plotting.plot_rolling_beta( returns, benchmark_rets, ax=ax_rolling_beta) plotting.plot_rolling_volatility( returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility) plotting.plot_rolling_sharpe( returns, ax=ax_rolling_sharpe) # Drawdowns plotting.plot_drawdown_periods( returns, top=5, ax=ax_drawdown) plotting.plot_drawdown_underwater( returns=returns, ax=ax_underwater) plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap) plotting.plot_annual_returns(returns, ax=ax_annual_returns) plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist) plotting.plot_return_quantiles( returns, live_start_date=live_start_date, ax=ax_return_quantiles) if bootstrap and (benchmark_rets is not None): ax_bootstrap = plt.subplot(gs[i, :]) plotting.plot_perf_stats(returns, benchmark_rets, ax=ax_bootstrap) elif bootstrap: raise ValueError('bootstrap requires passing of benchmark_rets.') for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
def create_position_tear_sheet(returns, positions, show_and_plot_top_pos=2, hide_positions=False, return_fig=False, sector_mappings=None, transactions=None, estimate_intraday='infer'): """ Generate a number of plots for analyzing a strategy's positions and holdings. - Plots: gross leverage, exposures, top positions, and holdings. - Will also print the top positions held. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. show_and_plot_top_pos : int, optional By default, this is 2, and both prints and plots the top 10 positions. If this is 0, it will only plot; if 1, it will only print. hide_positions : bool, optional If True, will not output any symbol names. Overrides show_and_plot_top_pos to 0 to suppress text output. return_fig : boolean, optional If True, returns the figure that was plotted on. sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. transactions : pd.DataFrame, optional Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) if hide_positions: show_and_plot_top_pos = 0 vertical_sections = 7 if sector_mappings is not None else 6 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_exposures = plt.subplot(gs[0, :]) ax_top_positions = plt.subplot(gs[1, :], sharex=ax_exposures) ax_max_median_pos = plt.subplot(gs[2, :], sharex=ax_exposures) ax_holdings = plt.subplot(gs[3, :], sharex=ax_exposures) ax_long_short_holdings = plt.subplot(gs[4, :]) ax_gross_leverage = plt.subplot(gs[5, :], sharex=ax_exposures) positions_alloc = pos.get_percent_alloc(positions) plotting.plot_exposures(returns, positions, ax=ax_exposures) plotting.show_and_plot_top_positions( returns, positions_alloc, show_and_plot=show_and_plot_top_pos, hide_positions=hide_positions, ax=ax_top_positions) plotting.plot_max_median_position_concentration(positions, ax=ax_max_median_pos) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) plotting.plot_long_short_holdings(returns, positions_alloc, ax=ax_long_short_holdings) plotting.plot_gross_leverage(returns, positions, ax=ax_gross_leverage) if sector_mappings is not None: sector_exposures = pos.get_sector_exposures(positions, sector_mappings) if len(sector_exposures.columns) > 1: sector_alloc = pos.get_percent_alloc(sector_exposures) sector_alloc = sector_alloc.drop('cash', axis='columns') ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures) plotting.plot_sector_allocations(returns, sector_alloc, ax=ax_sector_alloc) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
def create_txn_tear_sheet(returns, positions, transactions, unadjusted_returns=None, estimate_intraday='infer', return_fig=False): """ Generate a number of plots for analyzing a strategy's transactions. Plots: turnover, daily volume, and a histogram of daily volume. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. unadjusted_returns : pd.Series, optional Daily unadjusted returns of the strategy, noncumulative. Will plot additional swippage sweep analysis. - See pyfolio.plotting.plot_swippage_sleep and pyfolio.plotting.plot_slippage_sensitivity estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. return_fig : boolean, optional If True, returns the figure that was plotted on. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) vertical_sections = 6 if unadjusted_returns is not None else 4 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_turnover = plt.subplot(gs[0, :]) ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover) ax_turnover_hist = plt.subplot(gs[2, :]) ax_txn_timings = plt.subplot(gs[3, :]) plotting.plot_turnover( returns, transactions, positions, ax=ax_turnover) plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume) try: plotting.plot_daily_turnover_hist(transactions, positions, ax=ax_turnover_hist) except ValueError: warnings.warn('Unable to generate turnover plot.', UserWarning) plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings) if unadjusted_returns is not None: ax_slippage_sweep = plt.subplot(gs[4, :]) plotting.plot_slippage_sweep(unadjusted_returns, positions, transactions, ax=ax_slippage_sweep ) ax_slippage_sensitivity = plt.subplot(gs[5, :]) plotting.plot_slippage_sensitivity(unadjusted_returns, positions, transactions, ax=ax_slippage_sensitivity ) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
def create_round_trip_tear_sheet(returns, positions, transactions, sector_mappings=None, estimate_intraday='infer', return_fig=False): """ Generate a number of figures and plots describing the duration, frequency, and profitability of trade "round trips." A round trip is started when a new long or short position is opened and is only completed when the number of shares in that position returns to or crosses zero. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. return_fig : boolean, optional If True, returns the figure that was plotted on. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) transactions_closed = round_trips.add_closing_transactions(positions, transactions) # extract_round_trips requires BoD portfolio_value trades = round_trips.extract_round_trips( transactions_closed, portfolio_value=positions.sum(axis='columns') / (1 + returns) ) if len(trades) < 5: warnings.warn( """Fewer than 5 round-trip trades made. Skipping round trip tearsheet.""", UserWarning) return round_trips.print_round_trip_stats(trades) plotting.show_profit_attribution(trades) if sector_mappings is not None: sector_trades = round_trips.apply_sector_mappings_to_round_trips( trades, sector_mappings) plotting.show_profit_attribution(sector_trades) fig = plt.figure(figsize=(14, 3 * 6)) gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5) ax_trade_lifetimes = plt.subplot(gs[0, :]) ax_prob_profit_trade = plt.subplot(gs[1, 0]) ax_holding_time = plt.subplot(gs[1, 1]) ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0]) ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1]) plotting.plot_round_trip_lifetimes(trades, ax=ax_trade_lifetimes) plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade) trade_holding_times = [x.days for x in trades['duration']] sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time) ax_holding_time.set(xlabel='Holding time in days') sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars) ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $') sns.distplot(trades.returns.dropna() * 100, kde=False, ax=ax_pnl_per_round_trip_pct) ax_pnl_per_round_trip_pct.set( xlabel='Round-trip returns in %') gs.tight_layout(fig) if return_fig: return fig
def create_interesting_times_tear_sheet( returns, benchmark_rets=None, legend_loc='best', return_fig=False): """ Generate a number of returns plots around interesting points in time, like the flash crash and 9/11. Plots: returns around the dotcom bubble burst, Lehmann Brothers' failure, 9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing bubble burst, EZB IR, Great Recession (August 2007, March and September of 2008, Q1 & Q2 2009), flash crash, April and October 2014. benchmark_rets must be passed, as it is meaningless to analyze performance during interesting times without some benchmark to refer to. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. benchmark_rets : pd.Series Daily noncumulative returns of the benchmark. - This is in the same style as returns. legend_loc : plt.legend_loc, optional The legend's location. return_fig : boolean, optional If True, returns the figure that was plotted on. """ rets_interesting = timeseries.extract_interesting_date_ranges(returns) if not rets_interesting: warnings.warn('Passed returns do not overlap with any' 'interesting times.', UserWarning) return utils.print_table(pd.DataFrame(rets_interesting) .describe().transpose() .loc[:, ['mean', 'min', 'max']] * 100, name='Stress Events', float_format='{0:.2f}%'.format) if benchmark_rets is not None: returns = utils.clip_returns_to_benchmark(returns, benchmark_rets) bmark_interesting = timeseries.extract_interesting_date_ranges( benchmark_rets) num_plots = len(rets_interesting) # 2 plots, 1 row; 3 plots, 2 rows; 4 plots, 2 rows; etc. num_rows = int((num_plots + 1) / 2.0) fig = plt.figure(figsize=(14, num_rows * 6.0)) gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5) for i, (name, rets_period) in enumerate(rets_interesting.items()): # i=0 -> 0, i=1 -> 0, i=2 -> 1 ;; i=0 -> 0, i=1 -> 1, i=2 -> 0 ax = plt.subplot(gs[int(i / 2.0), i % 2]) ep.cum_returns(rets_period).plot( ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2) if benchmark_rets is not None: ep.cum_returns(bmark_interesting[name]).plot( ax=ax, color='gray', label='benchmark', alpha=0.6) ax.legend(['Algo', 'benchmark'], loc=legend_loc, frameon=True, framealpha=0.5) else: ax.legend(['Algo'], loc=legend_loc, frameon=True, framealpha=0.5) ax.set_title(name) ax.set_ylabel('Returns') ax.set_xlabel('') if return_fig: return fig
def create_capacity_tear_sheet(returns, positions, transactions, market_data, liquidation_daily_vol_limit=0.2, trade_daily_vol_limit=0.05, last_n_days=utils.APPROX_BDAYS_PER_MONTH * 6, days_to_liquidate_limit=1, estimate_intraday='infer'): """ Generates a report detailing portfolio size constraints set by least liquid tickers. Plots a "capacity sweep," a curve describing projected sharpe ratio given the slippage penalties that are applied at various capital bases. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. market_data : pd.Panel Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). liquidation_daily_vol_limit : float Max proportion of a daily bar that can be consumed in the process of liquidating a position in the "days to liquidation" analysis. trade_daily_vol_limit : float Flag daily transaction totals that exceed proportion of daily bar. last_n_days : integer Compute max position allocation and dollar volume for only the last N days of the backtest days_to_liquidate_limit : integer Display all tickers with greater max days to liquidation. estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) print("Max days to liquidation is computed for each traded name " "assuming a 20% limit on daily bar consumption \n" "and trailing 5 day mean volume as the available bar volume.\n\n" "Tickers with >1 day liquidation time at a" " constant $1m capital base:") max_days_by_ticker = capacity.get_max_days_to_liquidate_by_ticker( positions, market_data, max_bar_consumption=liquidation_daily_vol_limit, capital_base=1e6, mean_volume_window=5) max_days_by_ticker.index = ( max_days_by_ticker.index.map(utils.format_asset)) print("Whole backtest:") utils.print_table( max_days_by_ticker[max_days_by_ticker.days_to_liquidate > days_to_liquidate_limit]) max_days_by_ticker_lnd = capacity.get_max_days_to_liquidate_by_ticker( positions, market_data, max_bar_consumption=liquidation_daily_vol_limit, capital_base=1e6, mean_volume_window=5, last_n_days=last_n_days) max_days_by_ticker_lnd.index = ( max_days_by_ticker_lnd.index.map(utils.format_asset)) print("Last {} trading days:".format(last_n_days)) utils.print_table( max_days_by_ticker_lnd[max_days_by_ticker_lnd.days_to_liquidate > 1]) llt = capacity.get_low_liquidity_transactions(transactions, market_data) llt.index = llt.index.map(utils.format_asset) print('Tickers with daily transactions consuming >{}% of daily bar \n' 'all backtest:'.format(trade_daily_vol_limit * 100)) utils.print_table( llt[llt['max_pct_bar_consumed'] > trade_daily_vol_limit * 100]) llt = capacity.get_low_liquidity_transactions( transactions, market_data, last_n_days=last_n_days) print("Last {} trading days:".format(last_n_days)) utils.print_table( llt[llt['max_pct_bar_consumed'] > trade_daily_vol_limit * 100]) bt_starting_capital = positions.iloc[0].sum() / (1 + returns.iloc[0]) fig, ax_capacity_sweep = plt.subplots(figsize=(14, 6)) plotting.plot_capacity_sweep(returns, transactions, market_data, bt_starting_capital, min_pv=100000, max_pv=300000000, step_size=1000000, ax=ax_capacity_sweep)
def create_bayesian_tear_sheet(returns, benchmark_rets=None, live_start_date=None, samples=2000, return_fig=False, stoch_vol=False, progressbar=True): """ Generate a number of Bayesian distributions and a Bayesian cone plot of returns. Plots: Sharpe distribution, annual volatility distribution, annual alpha distribution, beta distribution, predicted 1 and 5 day returns distributions, and a cumulative returns cone plot. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. benchmark_rets : pd.Series, optional Daily noncumulative returns of the benchmark. - This is in the same style as returns. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. samples : int, optional Number of posterior samples to draw. return_fig : boolean, optional If True, returns the figure that was plotted on. stoch_vol : boolean, optional If True, run and plot the stochastic volatility model progressbar : boolean, optional If True, show a progress bar """ if not have_bayesian: raise NotImplementedError( "Bayesian tear sheet requirements not found.\n" "Run 'pip install pyfolio[bayesian]' to install " "bayesian requirements." ) if live_start_date is None: raise NotImplementedError( 'Bayesian tear sheet requires setting of live_start_date' ) live_start_date = ep.utils.get_utc_timestamp(live_start_date) df_train = returns.loc[returns.index < live_start_date] df_test = returns.loc[returns.index >= live_start_date] # Run T model with missing data print("Running T model") previous_time = time() # track the total run time of the Bayesian tear sheet start_time = previous_time trace_t, ppc_t = bayesian.run_model('t', df_train, returns_test=df_test, samples=samples, ppc=True, progressbar=progressbar) previous_time = timer("T model", previous_time) # Compute BEST model print("\nRunning BEST model") trace_best = bayesian.run_model('best', df_train, returns_test=df_test, samples=samples, progressbar=progressbar) previous_time = timer("BEST model", previous_time) # Plot results fig = plt.figure(figsize=(14, 10 * 2)) gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3) axs = [] row = 0 # Plot Bayesian cone ax_cone = plt.subplot(gs[row, :]) bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone) previous_time = timer("plotting Bayesian cone", previous_time) # Plot BEST results row += 1 axs.append(plt.subplot(gs[row, 0])) axs.append(plt.subplot(gs[row, 1])) row += 1 axs.append(plt.subplot(gs[row, 0])) axs.append(plt.subplot(gs[row, 1])) row += 1 axs.append(plt.subplot(gs[row, 0])) axs.append(plt.subplot(gs[row, 1])) row += 1 # Effect size across two axs.append(plt.subplot(gs[row, :])) bayesian.plot_best(trace=trace_best, axs=axs) previous_time = timer("plotting BEST results", previous_time) # Compute Bayesian predictions row += 1 ax_ret_pred_day = plt.subplot(gs[row, 0]) ax_ret_pred_week = plt.subplot(gs[row, 1]) day_pred = ppc_t[:, 0] p5 = scipy.stats.scoreatpercentile(day_pred, 5) sns.distplot(day_pred, ax=ax_ret_pred_day ) ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.) ax_ret_pred_day.set_xlabel('Predicted returns 1 day') ax_ret_pred_day.set_ylabel('Frequency') ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5, verticalalignment='bottom', horizontalalignment='right', transform=ax_ret_pred_day.transAxes) previous_time = timer("computing Bayesian predictions", previous_time) # Plot Bayesian VaRs week_pred = ( np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1] p5 = scipy.stats.scoreatpercentile(week_pred, 5) sns.distplot(week_pred, ax=ax_ret_pred_week ) ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.) ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days') ax_ret_pred_week.set_ylabel('Frequency') ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5, verticalalignment='bottom', horizontalalignment='right', transform=ax_ret_pred_week.transAxes) previous_time = timer("plotting Bayesian VaRs estimate", previous_time) # Run alpha beta model if benchmark_rets is not None: print("\nRunning alpha beta model") benchmark_rets = benchmark_rets.loc[df_train.index] trace_alpha_beta = bayesian.run_model('alpha_beta', df_train, bmark=benchmark_rets, samples=samples, progressbar=progressbar) previous_time = timer("running alpha beta model", previous_time) # Plot alpha and beta row += 1 ax_alpha = plt.subplot(gs[row, 0]) ax_beta = plt.subplot(gs[row, 1]) sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1, ax=ax_alpha) sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta) ax_alpha.set_xlabel('Annual Alpha') ax_alpha.set_ylabel('Belief') ax_beta.set_xlabel('Beta') ax_beta.set_ylabel('Belief') previous_time = timer("plotting alpha beta model", previous_time) if stoch_vol: # run stochastic volatility model returns_cutoff = 400 print( "\nRunning stochastic volatility model on " "most recent {} days of returns.".format(returns_cutoff) ) if df_train.size > returns_cutoff: df_train_truncated = df_train[-returns_cutoff:] _, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated) previous_time = timer( "running stochastic volatility model", previous_time) # plot latent volatility row += 1 ax_volatility = plt.subplot(gs[row, :]) bayesian.plot_stoch_vol( df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility) previous_time = timer( "plotting stochastic volatility model", previous_time) total_time = time() - start_time print("\nTotal runtime was {:.2f} seconds.".format(total_time)) gs.tight_layout(fig) if return_fig: return fig
def create_risk_tear_sheet(positions, style_factor_panel=None, sectors=None, caps=None, shares_held=None, volumes=None, percentile=None, returns=None, transactions=None, estimate_intraday='infer', return_fig=False): ''' Creates risk tear sheet: computes and plots style factor exposures, sector exposures, market cap exposures and volume exposures. Parameters ---------- positions : pd.DataFrame Daily equity positions of algorithm, in dollars. - DataFrame with dates as index, equities as columns - Last column is cash held - Example: Equity(24 Equity(62 [AAPL]) [ABT]) cash 2017-04-03 -108062.40 4401.540 2.247757e+07 2017-04-04 -108852.00 4373.820 2.540999e+07 2017-04-05 -119968.66 4336.200 2.839812e+07 style_factor_panel : pd.Panel Panel where each item is a DataFrame that tabulates style factor per equity per day. - Each item has dates as index, equities as columns - Example item: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 -0.51284 1.39173 2017-04-04 -0.73381 0.98149 2017-04-05 -0.90132 1.13981 sectors : pd.DataFrame Daily Morningstar sector code per asset - DataFrame with dates as index and equities as columns - Example: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 311.0 206.0 2017-04-04 311.0 206.0 2017-04-05 311.0 206.0 caps : pd.DataFrame Daily market cap per asset - DataFrame with dates as index and equities as columns - Example: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 1.327160e+10 6.402460e+10 2017-04-04 1.329620e+10 6.403694e+10 2017-04-05 1.297464e+10 6.397187e+10 shares_held : pd.DataFrame Daily number of shares held by an algorithm. - Example: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 1915 -2595 2017-04-04 1968 -3272 2017-04-05 2104 -3917 volumes : pd.DataFrame Daily volume per asset - DataFrame with dates as index and equities as columns - Example: Equity(24 Equity(62 [AAPL]) [ABT]) 2017-04-03 34940859.00 4665573.80 2017-04-04 35603329.10 4818463.90 2017-04-05 41846731.75 4129153.10 percentile : float Percentile to use when computing and plotting volume exposures. - Defaults to 10th percentile ''' positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) idx = positions.index & style_factor_panel.iloc[0].index & sectors.index \ & caps.index & shares_held.index & volumes.index positions = positions.loc[idx] vertical_sections = 0 if style_factor_panel is not None: vertical_sections += len(style_factor_panel.items) new_style_dict = {} for item in style_factor_panel.items: new_style_dict.update({item: style_factor_panel.loc[item].loc[idx]}) style_factor_panel = pd.Panel() style_factor_panel = style_factor_panel.from_dict(new_style_dict) if sectors is not None: vertical_sections += 4 sectors = sectors.loc[idx] if caps is not None: vertical_sections += 4 caps = caps.loc[idx] if (shares_held is not None) & (volumes is not None) \ & (percentile is not None): vertical_sections += 3 shares_held = shares_held.loc[idx] volumes = volumes.loc[idx] if percentile is None: percentile = 0.1 fig = plt.figure(figsize=[14, vertical_sections * 6]) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) if style_factor_panel is not None: style_axes = [] style_axes.append(plt.subplot(gs[0, :])) for i in range(1, len(style_factor_panel.items)): style_axes.append(plt.subplot(gs[i, :], sharex=style_axes[0])) j = 0 for name, df in style_factor_panel.iteritems(): sfe = risk.compute_style_factor_exposures(positions, df) risk.plot_style_factor_exposures(sfe, name, style_axes[j]) j += 1 if sectors is not None: i += 1 ax_sector_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0]) i += 2 ax_sector_gross = plt.subplot(gs[i, :], sharex=style_axes[0]) i += 1 ax_sector_net = plt.subplot(gs[i, :], sharex=style_axes[0]) long_exposures, short_exposures, gross_exposures, net_exposures \ = risk.compute_sector_exposures(positions, sectors) risk.plot_sector_exposures_longshort(long_exposures, short_exposures, ax=ax_sector_longshort) risk.plot_sector_exposures_gross(gross_exposures, ax=ax_sector_gross) risk.plot_sector_exposures_net(net_exposures, ax=ax_sector_net) if caps is not None: i += 1 ax_cap_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0]) i += 2 ax_cap_gross = plt.subplot(gs[i, :], sharex=style_axes[0]) i += 1 ax_cap_net = plt.subplot(gs[i, :], sharex=style_axes[0]) long_exposures, short_exposures, gross_exposures, net_exposures \ = risk.compute_cap_exposures(positions, caps) risk.plot_cap_exposures_longshort(long_exposures, short_exposures, ax_cap_longshort) risk.plot_cap_exposures_gross(gross_exposures, ax_cap_gross) risk.plot_cap_exposures_net(net_exposures, ax_cap_net) if volumes is not None: i += 1 ax_vol_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0]) i += 2 ax_vol_gross = plt.subplot(gs[i, :], sharex=style_axes[0]) longed_threshold, shorted_threshold, grossed_threshold \ = risk.compute_volume_exposures(positions, volumes, percentile) risk.plot_volume_exposures_longshort(longed_threshold, shorted_threshold, percentile, ax_vol_longshort) risk.plot_volume_exposures_gross(grossed_threshold, percentile, ax_vol_gross) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
def create_perf_attrib_tear_sheet(returns, positions, factor_returns, factor_loadings, transactions=None, pos_in_dollars=True, return_fig=False, factor_partitions=FACTOR_PARTITIONS): """ Generate plots and tables for analyzing a strategy's performance. Parameters ---------- returns : pd.Series Returns for each day in the date range. positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. factor_returns : pd.DataFrame Returns by factor, with date as index and factors as columns factor_loadings : pd.DataFrame Factor loadings for all days in the date range, with date and ticker as index, and factors as columns. transactions : pd.DataFrame, optional Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. - Default is None. pos_in_dollars : boolean, optional Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars. return_fig : boolean, optional If True, returns the figure that was plotted on. factor_partitions : dict dict specifying how factors should be separated in factor returns and risk exposures plots - Example: {'style': ['momentum', 'size', 'value', ...], 'sector': ['technology', 'materials', ... ]} """ portfolio_exposures, perf_attrib_data = perf_attrib.perf_attrib( returns, positions, factor_returns, factor_loadings, transactions, pos_in_dollars=pos_in_dollars ) display(Markdown("## Performance Relative to Common Risk Factors")) # aggregate perf attrib stats and show summary table perf_attrib.show_perf_attrib_stats(returns, positions, factor_returns, factor_loadings, transactions, pos_in_dollars) # one section for the returns plot, and for each factor grouping # one section for factor returns, and one for risk exposures vertical_sections = 1 + 2 * max(len(factor_partitions), 1) current_section = 0 fig = plt.figure(figsize=[14, vertical_sections * 6]) gs = gridspec.GridSpec(vertical_sections, 1, wspace=0.5, hspace=0.5) perf_attrib.plot_returns(perf_attrib_data, ax=plt.subplot(gs[current_section])) current_section += 1 if factor_partitions is not None: for factor_type, partitions in factor_partitions.iteritems(): columns_to_select = perf_attrib_data.columns.intersection( partitions ) perf_attrib.plot_factor_contribution_to_perf( perf_attrib_data[columns_to_select], ax=plt.subplot(gs[current_section]), title=( 'Cumulative common {} returns attribution' ).format(factor_type) ) current_section += 1 for factor_type, partitions in factor_partitions.iteritems(): perf_attrib.plot_risk_exposures( portfolio_exposures[portfolio_exposures.columns .intersection(partitions)], ax=plt.subplot(gs[current_section]), title='Daily {} factor exposures'.format(factor_type) ) current_section += 1 else: perf_attrib.plot_factor_contribution_to_perf( perf_attrib_data, ax=plt.subplot(gs[current_section]) ) current_section += 1 perf_attrib.plot_risk_exposures( portfolio_exposures, ax=plt.subplot(gs[current_section]) ) gs.tight_layout(fig) if return_fig: return fig
def daily_txns_with_bar_data(transactions, market_data): """ Sums the absolute value of shares traded in each name on each day. Adds columns containing the closing price and total daily volume for each day-ticker combination. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet market_data : pd.Panel Contains "volume" and "price" DataFrames for the tickers in the passed positions DataFrames Returns ------- txn_daily : pd.DataFrame Daily totals for transacted shares in each traded name. price and volume columns for close price and daily volume for the corresponding ticker, respectively. """ transactions.index.name = 'date' txn_daily = pd.DataFrame(transactions.assign( amount=abs(transactions.amount)).groupby( ['symbol', pd.TimeGrouper('D')]).sum()['amount']) txn_daily['price'] = market_data['price'].unstack() txn_daily['volume'] = market_data['volume'].unstack() txn_daily = txn_daily.reset_index().set_index('date') return txn_daily
def days_to_liquidate_positions(positions, market_data, max_bar_consumption=0.2, capital_base=1e6, mean_volume_window=5): """ Compute the number of days that would have been required to fully liquidate each position on each day based on the trailing n day mean daily bar volume and a limit on the proportion of a daily bar that we are allowed to consume. This analysis uses portfolio allocations and a provided capital base rather than the dollar values in the positions DataFrame to remove the effect of compounding on days to liquidate. In other words, this function assumes that the net liquidation portfolio value will always remain constant at capital_base. Parameters ---------- positions: pd.DataFrame Contains daily position values including cash - See full explanation in tears.create_full_tear_sheet market_data : pd.Panel Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). max_bar_consumption : float Max proportion of a daily bar that can be consumed in the process of liquidating a position. capital_base : integer Capital base multiplied by portfolio allocation to compute position value that needs liquidating. mean_volume_window : float Trailing window to use in mean volume calculation. Returns ------- days_to_liquidate : pd.DataFrame Number of days required to fully liquidate daily positions. Datetime index, symbols as columns. """ DV = market_data['volume'] * market_data['price'] roll_mean_dv = DV.rolling(window=mean_volume_window, center=False).mean().shift() roll_mean_dv = roll_mean_dv.replace(0, np.nan) positions_alloc = pos.get_percent_alloc(positions) positions_alloc = positions_alloc.drop('cash', axis=1) days_to_liquidate = (positions_alloc * capital_base) / \ (max_bar_consumption * roll_mean_dv) return days_to_liquidate.iloc[mean_volume_window:]
def get_max_days_to_liquidate_by_ticker(positions, market_data, max_bar_consumption=0.2, capital_base=1e6, mean_volume_window=5, last_n_days=None): """ Finds the longest estimated liquidation time for each traded name over the course of backtest (or last n days of the backtest). Parameters ---------- positions: pd.DataFrame Contains daily position values including cash - See full explanation in tears.create_full_tear_sheet market_data : pd.Panel Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). max_bar_consumption : float Max proportion of a daily bar that can be consumed in the process of liquidating a position. capital_base : integer Capital base multiplied by portfolio allocation to compute position value that needs liquidating. mean_volume_window : float Trailing window to use in mean volume calculation. last_n_days : integer Compute for only the last n days of the passed backtest data. Returns ------- days_to_liquidate : pd.DataFrame Max Number of days required to fully liquidate each traded name. Index of symbols. Columns for days_to_liquidate and the corresponding date and position_alloc on that day. """ dtlp = days_to_liquidate_positions(positions, market_data, max_bar_consumption=max_bar_consumption, capital_base=capital_base, mean_volume_window=mean_volume_window) if last_n_days is not None: dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):] pos_alloc = pos.get_percent_alloc(positions) pos_alloc = pos_alloc.drop('cash', axis=1) liq_desc = pd.DataFrame() liq_desc['days_to_liquidate'] = dtlp.unstack() liq_desc['pos_alloc_pct'] = pos_alloc.unstack() * 100 liq_desc.index.levels[0].name = 'symbol' liq_desc.index.levels[1].name = 'date' worst_liq = liq_desc.reset_index().sort_values( 'days_to_liquidate', ascending=False).groupby('symbol').first() return worst_liq
def get_low_liquidity_transactions(transactions, market_data, last_n_days=None): """ For each traded name, find the daily transaction total that consumed the greatest proportion of available daily bar volume. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. market_data : pd.Panel Panel with items axis of 'price' and 'volume' DataFrames. The major and minor axes should match those of the the passed positions DataFrame (same dates and symbols). last_n_days : integer Compute for only the last n days of the passed backtest data. """ txn_daily_w_bar = daily_txns_with_bar_data(transactions, market_data) txn_daily_w_bar.index.name = 'date' txn_daily_w_bar = txn_daily_w_bar.reset_index() if last_n_days is not None: md = txn_daily_w_bar.date.max() - pd.Timedelta(days=last_n_days) txn_daily_w_bar = txn_daily_w_bar[txn_daily_w_bar.date > md] bar_consumption = txn_daily_w_bar.assign( max_pct_bar_consumed=( txn_daily_w_bar.amount/txn_daily_w_bar.volume)*100 ).sort_values('max_pct_bar_consumed', ascending=False) max_bar_consumption = bar_consumption.groupby('symbol').first() return max_bar_consumption[['date', 'max_pct_bar_consumed']]
def apply_slippage_penalty(returns, txn_daily, simulate_starting_capital, backtest_starting_capital, impact=0.1): """ Applies quadratic volumeshare slippage model to daily returns based on the proportion of the observed historical daily bar dollar volume consumed by the strategy's trades. Scales the size of trades based on the ratio of the starting capital we wish to test to the starting capital of the passed backtest data. Parameters ---------- returns : pd.Series Time series of daily returns. txn_daily : pd.Series Daily transaciton totals, closing price, and daily volume for each traded name. See price_volume_daily_txns for more details. simulate_starting_capital : integer capital at which we want to test backtest_starting_capital: capital base at which backtest was origionally run. impact: See Zipline volumeshare slippage model impact : float Scales the size of the slippage penalty. Returns ------- adj_returns : pd.Series Slippage penalty adjusted daily returns. """ mult = simulate_starting_capital / backtest_starting_capital simulate_traded_shares = abs(mult * txn_daily.amount) simulate_traded_dollars = txn_daily.price * simulate_traded_shares simulate_pct_volume_used = simulate_traded_shares / txn_daily.volume penalties = simulate_pct_volume_used**2 \ * impact * simulate_traded_dollars daily_penalty = penalties.resample('D').sum() daily_penalty = daily_penalty.reindex(returns.index).fillna(0) # Since we are scaling the numerator of the penalties linearly # by capital base, it makes the most sense to scale the denominator # similarly. In other words, since we aren't applying compounding to # simulate_traded_shares, we shouldn't apply compounding to pv. portfolio_value = ep.cum_returns( returns, starting_value=backtest_starting_capital) * mult adj_returns = returns - (daily_penalty / portfolio_value) return adj_returns
def map_transaction(txn): """ Maps a single transaction row to a dictionary. Parameters ---------- txn : pd.DataFrame A single transaction object to convert to a dictionary. Returns ------- dict Mapped transaction. """ if isinstance(txn['sid'], dict): sid = txn['sid']['sid'] symbol = txn['sid']['symbol'] else: sid = txn['sid'] symbol = txn['sid'] return {'sid': sid, 'symbol': symbol, 'price': txn['price'], 'order_id': txn['order_id'], 'amount': txn['amount'], 'commission': txn['commission'], 'dt': txn['dt']}
def make_transaction_frame(transactions): """ Formats a transaction DataFrame. Parameters ---------- transactions : pd.DataFrame Contains improperly formatted transactional data. Returns ------- df : pd.DataFrame Daily transaction volume and dollar ammount. - See full explanation in tears.create_full_tear_sheet. """ transaction_list = [] for dt in transactions.index: txns = transactions.loc[dt] if len(txns) == 0: continue for txn in txns: txn = map_transaction(txn) transaction_list.append(txn) df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt'])) df['txn_dollars'] = -df['amount'] * df['price'] df.index = list(map(pd.Timestamp, df.dt.values)) return df
def get_txn_vol(transactions): """ Extract daily transaction data from set of transaction objects. Parameters ---------- transactions : pd.DataFrame Time series containing one row per symbol (and potentially duplicate datetime indices) and columns for amount and price. Returns ------- pd.DataFrame Daily transaction volume and number of shares. - See full explanation in tears.create_full_tear_sheet. """ txn_norm = transactions.copy() txn_norm.index = txn_norm.index.normalize() amounts = txn_norm.amount.abs() prices = txn_norm.price values = amounts * prices daily_amounts = amounts.groupby(amounts.index).sum() daily_values = values.groupby(values.index).sum() daily_amounts.name = "txn_shares" daily_values.name = "txn_volume" return pd.concat([daily_values, daily_amounts], axis=1)
def adjust_returns_for_slippage(returns, positions, transactions, slippage_bps): """ Apply a slippage penalty for every dollar traded. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. slippage_bps: int/float Basis points of slippage to apply. Returns ------- pd.Series Time series of daily returns, adjusted for slippage. """ slippage = 0.0001 * slippage_bps portfolio_value = positions.sum(axis=1) pnl = portfolio_value * returns traded_value = get_txn_vol(transactions).txn_volume slippage_dollars = traded_value * slippage adjusted_pnl = pnl.add(-slippage_dollars, fill_value=0) adjusted_returns = returns * adjusted_pnl / pnl return adjusted_returns
def get_turnover(positions, transactions, denominator='AGB'): """ - Value of purchases and sales divided by either the actual gross book or the portfolio value for the time step. Parameters ---------- positions : pd.DataFrame Contains daily position values including cash. - See full explanation in tears.create_full_tear_sheet transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet denominator : str, optional Either 'AGB' or 'portfolio_value', default AGB. - AGB (Actual gross book) is the gross market value (GMV) of the specific algo being analyzed. Swapping out an entire portfolio of stocks for another will yield 200% turnover, not 100%, since transactions are being made for both sides. - We use average of the previous and the current end-of-period AGB to avoid singularities when trading only into or out of an entire book in one trading period. - portfolio_value is the total value of the algo's positions end-of-period, including cash. Returns ------- turnover_rate : pd.Series timeseries of portfolio turnover rates. """ txn_vol = get_txn_vol(transactions) traded_value = txn_vol.txn_volume if denominator == 'AGB': # Actual gross book is the same thing as the algo's GMV # We want our denom to be avg(AGB previous, AGB current) AGB = positions.drop('cash', axis=1).abs().sum(axis=1) denom = AGB.rolling(2).mean() # Since the first value of pd.rolling returns NaN, we # set our "day 0" AGB to 0. denom.iloc[0] = AGB.iloc[0] / 2 elif denominator == 'portfolio_value': denom = positions.sum(axis=1) else: raise ValueError( "Unexpected value for denominator '{}'. The " "denominator parameter must be either 'AGB'" " or 'portfolio_value'.".format(denominator) ) denom.index = denom.index.normalize() turnover = traded_value.div(denom, axis='index') turnover = turnover.fillna(0) return turnover
def _groupby_consecutive(txn, max_delta=pd.Timedelta('8h')): """Merge transactions of the same direction separated by less than max_delta time duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed round_trips. One row per trade. - See full explanation in tears.create_full_tear_sheet max_delta : pandas.Timedelta (optional) Merge transactions in the same direction separated by less than max_delta time duration. Returns ------- transactions : pd.DataFrame """ def vwap(transaction): if transaction.amount.sum() == 0: warnings.warn('Zero transacted shares, setting vwap to nan.') return np.nan return (transaction.amount * transaction.price).sum() / \ transaction.amount.sum() out = [] for sym, t in txn.groupby('symbol'): t = t.sort_index() t.index.name = 'dt' t = t.reset_index() t['order_sign'] = t.amount > 0 t['block_dir'] = (t.order_sign.shift( 1) != t.order_sign).astype(int).cumsum() t['block_time'] = ((t.dt.sub(t.dt.shift(1))) > max_delta).astype(int).cumsum() grouped_price = (t.groupby(('block_dir', 'block_time')) .apply(vwap)) grouped_price.name = 'price' grouped_rest = t.groupby(('block_dir', 'block_time')).agg({ 'amount': 'sum', 'symbol': 'first', 'dt': 'first'}) grouped = grouped_rest.join(grouped_price) out.append(grouped) out = pd.concat(out) out = out.set_index('dt') return out
def extract_round_trips(transactions, portfolio_value=None): """Group transactions into "round trips". First, transactions are grouped by day and directionality. Then, long and short transactions are matched to create round-trip round_trips for which PnL, duration and returns are computed. Crossings where a position changes from long to short and vice-versa are handled correctly. Under the hood, we reconstruct the individual shares in a portfolio over time and match round_trips in a FIFO-order. For example, the following transactions would constitute one round trip: index amount price symbol 2004-01-09 12:18:01 10 50 'AAPL' 2004-01-09 15:12:53 10 100 'AAPL' 2004-01-13 14:41:23 -10 100 'AAPL' 2004-01-13 15:23:34 -10 200 'AAPL' First, the first two and last two round_trips will be merged into a two single transactions (computing the price via vwap). Then, during the portfolio reconstruction, the two resulting transactions will be merged and result in 1 round-trip trade with a PnL of (150 * 20) - (75 * 20) = 1500. Note, that round trips do not have to close out positions completely. For example, we could have removed the last transaction in the example above and still generated a round-trip over 10 shares with 10 shares left in the portfolio to be matched with a later transaction. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed round_trips. One row per trade. - See full explanation in tears.create_full_tear_sheet portfolio_value : pd.Series (optional) Portfolio value (all net assets including cash) over time. Note that portfolio_value needs to beginning of day, so either use .shift() or positions.sum(axis='columns') / (1+returns). Returns ------- round_trips : pd.DataFrame DataFrame with one row per round trip. The returns column contains returns in respect to the portfolio value while rt_returns are the returns in regards to the invested capital into that partiulcar round-trip. """ transactions = _groupby_consecutive(transactions) roundtrips = [] for sym, trans_sym in transactions.groupby('symbol'): trans_sym = trans_sym.sort_index() price_stack = deque() dt_stack = deque() trans_sym['signed_price'] = trans_sym.price * \ np.sign(trans_sym.amount) trans_sym['abs_amount'] = trans_sym.amount.abs().astype(int) for dt, t in trans_sym.iterrows(): if t.price < 0: warnings.warn('Negative price detected, ignoring for' 'round-trip.') continue indiv_prices = [t.signed_price] * t.abs_amount if (len(price_stack) == 0) or \ (copysign(1, price_stack[-1]) == copysign(1, t.amount)): price_stack.extend(indiv_prices) dt_stack.extend([dt] * len(indiv_prices)) else: # Close round-trip pnl = 0 invested = 0 cur_open_dts = [] for price in indiv_prices: if len(price_stack) != 0 and \ (copysign(1, price_stack[-1]) != copysign(1, price)): # Retrieve first dt, stock-price pair from # stack prev_price = price_stack.popleft() prev_dt = dt_stack.popleft() pnl += -(price + prev_price) cur_open_dts.append(prev_dt) invested += abs(prev_price) else: # Push additional stock-prices onto stack price_stack.append(price) dt_stack.append(dt) roundtrips.append({'pnl': pnl, 'open_dt': cur_open_dts[0], 'close_dt': dt, 'long': price < 0, 'rt_returns': pnl / invested, 'symbol': sym, }) roundtrips = pd.DataFrame(roundtrips) roundtrips['duration'] = roundtrips['close_dt'].sub(roundtrips['open_dt']) if portfolio_value is not None: # Need to normalize so that we can join pv = pd.DataFrame(portfolio_value, columns=['portfolio_value'])\ .assign(date=portfolio_value.index) roundtrips['date'] = roundtrips.close_dt.apply(lambda x: x.replace(hour=0, minute=0, second=0)) tmp = roundtrips.join(pv, on='date', lsuffix='_') roundtrips['returns'] = tmp.pnl / tmp.portfolio_value roundtrips = roundtrips.drop('date', axis='columns') return roundtrips
def add_closing_transactions(positions, transactions): """ Appends transactions that close out all positions at the end of the timespan covered by positions data. Utilizes pricing information in the positions DataFrame to determine closing price. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. transactions : pd.DataFrame Prices and amounts of executed round_trips. One row per trade. - See full explanation in tears.create_full_tear_sheet Returns ------- closed_txns : pd.DataFrame Transactions with closing transactions appended. """ closed_txns = transactions[['symbol', 'amount', 'price']] pos_at_end = positions.drop('cash', axis=1).iloc[-1] open_pos = pos_at_end.replace(0, np.nan).dropna() # Add closing round_trips one second after the close to be sure # they don't conflict with other round_trips executed at that time. end_dt = open_pos.name + pd.Timedelta(seconds=1) for sym, ending_val in open_pos.iteritems(): txn_sym = transactions[transactions.symbol == sym] ending_amount = txn_sym.amount.sum() ending_price = ending_val / ending_amount closing_txn = {'symbol': sym, 'amount': -ending_amount, 'price': ending_price} closing_txn = pd.DataFrame(closing_txn, index=[end_dt]) closed_txns = closed_txns.append(closing_txn) closed_txns = closed_txns[closed_txns.amount != 0] return closed_txns
def apply_sector_mappings_to_round_trips(round_trips, sector_mappings): """ Translates round trip symbols to sectors. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. Returns ------- sector_round_trips : pd.DataFrame Round trips with symbol names replaced by sector names. """ sector_round_trips = round_trips.copy() sector_round_trips.symbol = sector_round_trips.symbol.apply( lambda x: sector_mappings.get(x, 'No Sector Mapping')) sector_round_trips = sector_round_trips.dropna(axis=0) return sector_round_trips
def gen_round_trip_stats(round_trips): """Generate various round-trip statistics. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips Returns ------- stats : dict A dictionary where each value is a pandas DataFrame containing various round-trip statistics. See also -------- round_trips.print_round_trip_stats """ stats = {} stats['pnl'] = agg_all_long_short(round_trips, 'pnl', PNL_STATS) stats['summary'] = agg_all_long_short(round_trips, 'pnl', SUMMARY_STATS) stats['duration'] = agg_all_long_short(round_trips, 'duration', DURATION_STATS) stats['returns'] = agg_all_long_short(round_trips, 'returns', RETURN_STATS) stats['symbols'] = \ round_trips.groupby('symbol')['returns'].agg(RETURN_STATS).T return stats
def print_round_trip_stats(round_trips, hide_pos=False): """Print various round-trip statistics. Tries to pretty-print tables with HTML output if run inside IPython NB. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips See also -------- round_trips.gen_round_trip_stats """ stats = gen_round_trip_stats(round_trips) print_table(stats['summary'], float_format='{:.2f}'.format, name='Summary stats') print_table(stats['pnl'], float_format='${:.2f}'.format, name='PnL stats') print_table(stats['duration'], float_format='{:.2f}'.format, name='Duration stats') print_table(stats['returns'] * 100, float_format='{:.2f}%'.format, name='Return stats') if not hide_pos: stats['symbols'].columns = stats['symbols'].columns.map(format_asset) print_table(stats['symbols'] * 100, float_format='{:.2f}%'.format, name='Symbol stats')
def perf_attrib(returns, positions, factor_returns, factor_loadings, transactions=None, pos_in_dollars=True): """ Attributes the performance of a returns stream to a set of risk factors. Preprocesses inputs, and then calls empyrical.perf_attrib. See empyrical.perf_attrib for more info. Performance attribution determines how much each risk factor, e.g., momentum, the technology sector, etc., contributed to total returns, as well as the daily exposure to each of the risk factors. The returns that can be attributed to one of the given risk factors are the `common_returns`, and the returns that _cannot_ be attributed to a risk factor are the `specific_returns`, or the alpha. The common_returns and specific_returns summed together will always equal the total returns. Parameters ---------- returns : pd.Series Returns for each day in the date range. - Example: 2017-01-01 -0.017098 2017-01-02 0.002683 2017-01-03 -0.008669 positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. - Examples: AAPL TLT XOM cash 2017-01-01 34 58 10 0 2017-01-02 22 77 18 0 2017-01-03 -15 27 30 15 AAPL TLT XOM cash 2017-01-01 0.333333 0.568627 0.098039 0.0 2017-01-02 0.188034 0.658120 0.153846 0.0 2017-01-03 0.208333 0.375000 0.416667 0.0 factor_returns : pd.DataFrame Returns by factor, with date as index and factors as columns - Example: momentum reversal 2017-01-01 0.002779 -0.005453 2017-01-02 0.001096 0.010290 factor_loadings : pd.DataFrame Factor loadings for all days in the date range, with date and ticker as index, and factors as columns. - Example: momentum reversal dt ticker 2017-01-01 AAPL -1.592914 0.852830 TLT 0.184864 0.895534 XOM 0.993160 1.149353 2017-01-02 AAPL -0.140009 -0.524952 TLT -1.066978 0.185435 XOM -1.798401 0.761549 transactions : pd.DataFrame, optional Executed trade volumes and fill prices. Used to check the turnover of the algorithm. Default is None, in which case the turnover check is skipped. - One row per trade. - Trades on different names that occur at the same time will have identical indicies. - Example: index amount price symbol 2004-01-09 12:18:01 483 324.12 'AAPL' 2004-01-09 12:18:01 122 83.10 'MSFT' 2004-01-13 14:12:23 -75 340.43 'AAPL' pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars. Returns ------- tuple of (risk_exposures_portfolio, perf_attribution) risk_exposures_portfolio : pd.DataFrame df indexed by datetime, with factors as columns - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 perf_attribution : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 """ (returns, positions, factor_returns, factor_loadings) = _align_and_warn(returns, positions, factor_returns, factor_loadings, transactions=transactions, pos_in_dollars=pos_in_dollars) # Note that we convert positions to percentages *after* the checks # above, since get_turnover() expects positions in dollars. positions = _stack_positions(positions, pos_in_dollars=pos_in_dollars) return ep.perf_attrib(returns, positions, factor_returns, factor_loadings)
def compute_exposures(positions, factor_loadings, stack_positions=True, pos_in_dollars=True): """ Compute daily risk factor exposures. Normalizes positions (if necessary) and calls ep.compute_exposures. See empyrical.compute_exposures for more info. Parameters ---------- positions: pd.DataFrame or pd.Series Daily holdings (in dollars or percentages), indexed by date, OR a series of holdings indexed by date and ticker. - Examples: AAPL TLT XOM cash 2017-01-01 34 58 10 0 2017-01-02 22 77 18 0 2017-01-03 -15 27 30 15 AAPL TLT XOM cash 2017-01-01 0.333333 0.568627 0.098039 0.0 2017-01-02 0.188034 0.658120 0.153846 0.0 2017-01-03 0.208333 0.375000 0.416667 0.0 dt ticker 2017-01-01 AAPL 0.417582 TLT 0.010989 XOM 0.571429 2017-01-02 AAPL 0.202381 TLT 0.535714 XOM 0.261905 factor_loadings : pd.DataFrame Factor loadings for all days in the date range, with date and ticker as index, and factors as columns. - Example: momentum reversal dt ticker 2017-01-01 AAPL -1.592914 0.852830 TLT 0.184864 0.895534 XOM 0.993160 1.149353 2017-01-02 AAPL -0.140009 -0.524952 TLT -1.066978 0.185435 XOM -1.798401 0.761549 stack_positions : bool Flag indicating whether `positions` should be converted to long format. pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars. Returns ------- risk_exposures_portfolio : pd.DataFrame df indexed by datetime, with factors as columns. - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 """ if stack_positions: positions = _stack_positions(positions, pos_in_dollars=pos_in_dollars) return ep.compute_exposures(positions, factor_loadings)
def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ summary = OrderedDict() total_returns = perf_attrib['total_returns'] specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] summary['Annualized Specific Return'] =\ ep.annual_return(specific_returns) summary['Annualized Common Return'] =\ ep.annual_return(common_returns) summary['Annualized Total Return'] =\ ep.annual_return(total_returns) summary['Specific Sharpe Ratio'] =\ ep.sharpe_ratio(specific_returns) summary['Cumulative Specific Return'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative Common Return'] =\ ep.cum_returns_final(common_returns) summary['Total Returns'] =\ ep.cum_returns_final(total_returns) summary = pd.Series(summary, name='') annualized_returns_by_factor = [ep.annual_return(perf_attrib[c]) for c in risk_exposures.columns] cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c]) for c in risk_exposures.columns] risk_exposure_summary = pd.DataFrame( data=OrderedDict([ ( 'Average Risk Factor Exposure', risk_exposures.mean(axis='rows') ), ('Annualized Return', annualized_returns_by_factor), ('Cumulative Return', cumulative_returns_by_factor), ]), index=risk_exposures.columns, ) return summary, risk_exposure_summary
def show_perf_attrib_stats(returns, positions, factor_returns, factor_loadings, transactions=None, pos_in_dollars=True): """ Calls `perf_attrib` using inputs, and displays outputs using `utils.print_table`. """ risk_exposures, perf_attrib_data = perf_attrib( returns, positions, factor_returns, factor_loadings, transactions, pos_in_dollars=pos_in_dollars, ) perf_attrib_stats, risk_exposure_stats =\ create_perf_attrib_stats(perf_attrib_data, risk_exposures) percentage_formatter = '{:.2%}'.format float_formatter = '{:.2f}'.format summary_stats = perf_attrib_stats.loc[['Annualized Specific Return', 'Annualized Common Return', 'Annualized Total Return', 'Specific Sharpe Ratio']] # Format return rows in summary stats table as percentages. for col_name in ( 'Annualized Specific Return', 'Annualized Common Return', 'Annualized Total Return', ): summary_stats[col_name] = percentage_formatter(summary_stats[col_name]) # Display sharpe to two decimal places. summary_stats['Specific Sharpe Ratio'] = float_formatter( summary_stats['Specific Sharpe Ratio'] ) print_table(summary_stats, name='Summary Statistics') print_table( risk_exposure_stats, name='Exposures Summary', # In exposures table, format exposure column to 2 decimal places, and # return columns as percentages. formatters={ 'Average Risk Factor Exposure': float_formatter, 'Annualized Return': percentage_formatter, 'Cumulative Return': percentage_formatter, }, )
def plot_returns(perf_attrib_data, cost=None, ax=None): """ Plot total, specific, and common returns. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index. Assumes the `total_returns` column is NOT cost adjusted. - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 cost : pd.Series, optional if present, gets subtracted from `perf_attrib_data['total_returns']`, and gets plotted separately ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() returns = perf_attrib_data['total_returns'] total_returns_label = 'Total returns' cumulative_returns_less_costs = _cumulative_returns_less_costs( returns, cost ) if cost is not None: total_returns_label += ' (adjusted)' specific_returns = perf_attrib_data['specific_returns'] common_returns = perf_attrib_data['common_returns'] ax.plot(cumulative_returns_less_costs, color='b', label=total_returns_label) ax.plot(ep.cum_returns(specific_returns), color='g', label='Cumulative specific returns') ax.plot(ep.cum_returns(common_returns), color='r', label='Cumulative common returns') if cost is not None: ax.plot(-ep.cum_returns(cost), color='k', label='Cumulative cost spent') ax.set_title('Time series of cumulative returns') ax.set_ylabel('Returns') configure_legend(ax) return ax
def plot_alpha_returns(alpha_returns, ax=None): """ Plot histogram of daily multi-factor alpha returns (specific returns). Parameters ---------- alpha_returns : pd.Series series of daily alpha returns indexed by datetime ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() ax.hist(alpha_returns, color='g', label='Multi-factor alpha') ax.set_title('Histogram of alphas') ax.axvline(0, color='k', linestyle='--', label='Zero') avg = alpha_returns.mean() ax.axvline(avg, color='b', label='Mean = {: 0.5f}'.format(avg)) configure_legend(ax) return ax
def plot_factor_contribution_to_perf( perf_attrib_data, ax=None, title='Cumulative common returns attribution', ): """ Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() factors_to_plot = perf_attrib_data.drop( ['total_returns', 'common_returns'], axis='columns', errors='ignore' ) factors_cumulative = pd.DataFrame() for factor in factors_to_plot: factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor]) for col in factors_cumulative: ax.plot(factors_cumulative[col]) ax.axhline(0, color='k') configure_legend(ax, change_colors=True) ax.set_ylabel('Cumulative returns by factor') ax.set_title(title) return ax
def plot_risk_exposures(exposures, ax=None, title='Daily risk factor exposures'): """ Parameters ---------- exposures : pd.DataFrame df indexed by datetime, with factors as columns - Example: momentum reversal dt 2017-01-01 -0.238655 0.077123 2017-01-02 0.821872 1.520515 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() for col in exposures: ax.plot(exposures[col]) configure_legend(ax, change_colors=True) ax.set_ylabel('Factor exposures') ax.set_title(title) return ax
def _align_and_warn(returns, positions, factor_returns, factor_loadings, transactions=None, pos_in_dollars=True): """ Make sure that all inputs have matching dates and tickers, and raise warnings if necessary. """ missing_stocks = positions.columns.difference( factor_loadings.index.get_level_values(1).unique() ) # cash will not be in factor_loadings num_stocks = len(positions.columns) - 1 missing_stocks = missing_stocks.drop('cash') num_stocks_covered = num_stocks - len(missing_stocks) missing_ratio = round(len(missing_stocks) / num_stocks, ndigits=3) if num_stocks_covered == 0: raise ValueError("Could not perform performance attribution. " "No factor loadings were available for this " "algorithm's positions.") if len(missing_stocks) > 0: if len(missing_stocks) > 5: missing_stocks_displayed = ( " {} assets were missing factor loadings, including: {}..{}" ).format(len(missing_stocks), ', '.join(missing_stocks[:5].map(str)), missing_stocks[-1]) avg_allocation_msg = "selected missing assets" else: missing_stocks_displayed = ( "The following assets were missing factor loadings: {}." ).format(list(missing_stocks)) avg_allocation_msg = "missing assets" missing_stocks_warning_msg = ( "Could not determine risk exposures for some of this algorithm's " "positions. Returns from the missing assets will not be properly " "accounted for in performance attribution.\n" "\n" "{}. " "Ignoring for exposure calculation and performance attribution. " "Ratio of assets missing: {}. Average allocation of {}:\n" "\n" "{}.\n" ).format( missing_stocks_displayed, missing_ratio, avg_allocation_msg, positions[missing_stocks[:5].union(missing_stocks[[-1]])].mean(), ) warnings.warn(missing_stocks_warning_msg) positions = positions.drop(missing_stocks, axis='columns', errors='ignore') missing_factor_loadings_index = positions.index.difference( factor_loadings.index.get_level_values(0).unique() ) missing_factor_loadings_index = positions.index.difference( factor_loadings.index.get_level_values(0).unique() ) if len(missing_factor_loadings_index) > 0: if len(missing_factor_loadings_index) > 5: missing_dates_displayed = ( "(first missing is {}, last missing is {})" ).format( missing_factor_loadings_index[0], missing_factor_loadings_index[-1] ) else: missing_dates_displayed = list(missing_factor_loadings_index) warning_msg = ( "Could not find factor loadings for {} dates: {}. " "Truncating date range for performance attribution. " ).format(len(missing_factor_loadings_index), missing_dates_displayed) warnings.warn(warning_msg) positions = positions.drop(missing_factor_loadings_index, errors='ignore') returns = returns.drop(missing_factor_loadings_index, errors='ignore') factor_returns = factor_returns.drop(missing_factor_loadings_index, errors='ignore') if transactions is not None and pos_in_dollars: turnover = get_turnover(positions, transactions).mean() if turnover > PERF_ATTRIB_TURNOVER_THRESHOLD: warning_msg = ( "This algorithm has relatively high turnover of its " "positions. As a result, performance attribution might not be " "fully accurate.\n" "\n" "Performance attribution is calculated based " "on end-of-day holdings and does not account for intraday " "activity. Algorithms that derive a high percentage of " "returns from buying and selling within the same day may " "receive inaccurate performance attribution.\n" ) warnings.warn(warning_msg) return (returns, positions, factor_returns, factor_loadings)
def _stack_positions(positions, pos_in_dollars=True): """ Convert positions to percentages if necessary, and change them to long format. Parameters ---------- positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars. """ if pos_in_dollars: # convert holdings to percentages positions = get_percent_alloc(positions) # remove cash after normalizing positions positions = positions.drop('cash', axis='columns') # convert positions to long format positions = positions.stack() positions.index = positions.index.set_names(['dt', 'ticker']) return positions
def _cumulative_returns_less_costs(returns, costs): """ Compute cumulative returns, less costs. """ if costs is None: return ep.cum_returns(returns) return ep.cum_returns(returns - costs)
def format_asset(asset): """ If zipline asset objects are used, we want to print them out prettily within the tear sheet. This function should only be applied directly before displaying. """ try: import zipline.assets except ImportError: return asset if isinstance(asset, zipline.assets.Asset): return asset.symbol else: return asset
def vectorize(func): """ Decorator so that functions can be written to work on Series but may still be called with DataFrames. """ def wrapper(df, *args, **kwargs): if df.ndim == 1: return func(df, *args, **kwargs) elif df.ndim == 2: return df.apply(func, *args, **kwargs) return wrapper
def extract_rets_pos_txn_from_zipline(backtest): """ Extract returns, positions, transactions and leverage from the backtest data structure returned by zipline.TradingAlgorithm.run(). The returned data structures are in a format compatible with the rest of pyfolio and can be directly passed to e.g. tears.create_full_tear_sheet(). Parameters ---------- backtest : pd.DataFrame DataFrame returned by zipline.TradingAlgorithm.run() Returns ------- returns : pd.Series Daily returns of strategy. - See full explanation in tears.create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in tears.create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet. Example (on the Quantopian research platform) --------------------------------------------- >>> backtest = my_algo.run() >>> returns, positions, transactions = >>> pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest) >>> pyfolio.tears.create_full_tear_sheet(returns, >>> positions, transactions) """ backtest.index = backtest.index.normalize() if backtest.index.tzinfo is None: backtest.index = backtest.index.tz_localize('UTC') returns = backtest.returns raw_positions = [] for dt, pos_row in backtest.positions.iteritems(): df = pd.DataFrame(pos_row) df.index = [dt] * len(df) raw_positions.append(df) if not raw_positions: raise ValueError("The backtest does not have any positions.") positions = pd.concat(raw_positions) positions = pos.extract_pos(positions, backtest.ending_cash) transactions = txn.make_transaction_frame(backtest.transactions) if transactions.index.tzinfo is None: transactions.index = transactions.index.tz_localize('utc') return returns, positions, transactions
def print_table(table, name=None, float_format=None, formatters=None, header_rows=None): """ Pretty print a pandas DataFrame. Uses HTML output if running inside Jupyter Notebook, otherwise formatted text output. Parameters ---------- table : pandas.Series or pandas.DataFrame Table to pretty-print. name : str, optional Table name to display in upper left corner. float_format : function, optional Formatter to use for displaying table elements, passed as the `float_format` arg to pd.Dataframe.to_html. E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'. formatters : list or dict, optional Formatters to use by column, passed as the `formatters` arg to pd.Dataframe.to_html. header_rows : dict, optional Extra rows to display at the top of the table. """ if isinstance(table, pd.Series): table = pd.DataFrame(table) if name is not None: table.columns.name = name html = table.to_html(float_format=float_format, formatters=formatters) if header_rows is not None: # Count the number of columns for the text to span n_cols = html.split('<thead>')[1].split('</thead>')[0].count('<th>') # Generate the HTML for the extra rows rows = '' for name, value in header_rows.items(): rows += ('\n <tr style="text-align: right;"><th>%s</th>' + '<td colspan=%d>%s</td></tr>') % (name, n_cols, value) # Inject the new HTML html = html.replace('<thead>', '<thead>' + rows) display(HTML(html))
def detect_intraday(positions, transactions, threshold=0.25): """ Attempt to detect an intraday strategy. Get the number of positions held at the end of the day, and divide that by the number of unique stocks transacted every day. If the average quotient is below a threshold, then an intraday strategy is detected. Parameters ---------- positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. Returns ------- boolean True if an intraday strategy is detected. """ daily_txn = transactions.copy() daily_txn.index = daily_txn.index.date txn_count = daily_txn.groupby(level=0).symbol.nunique().sum() daily_pos = positions.drop('cash', axis=1).replace(0, np.nan) return daily_pos.count(axis=1).sum() / txn_count < threshold
def check_intraday(estimate, returns, positions, transactions): """ Logic for checking if a strategy is intraday and processing it. Parameters ---------- estimate: boolean or str, optional Approximate returns for intraday strategies. See description in tears.create_full_tear_sheet. returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. Returns ------- pd.DataFrame Daily net position values, adjusted for intraday movement. """ if estimate == 'infer': if positions is not None and transactions is not None: if detect_intraday(positions, transactions): warnings.warn('Detected intraday strategy; inferring positi' + 'ons from transactions. Set estimate_intraday' + '=False to disable.') return estimate_intraday(returns, positions, transactions) else: return positions else: return positions elif estimate: if positions is not None and transactions is not None: return estimate_intraday(returns, positions, transactions) else: raise ValueError('Positions and txns needed to estimate intraday') else: return positions
def estimate_intraday(returns, positions, transactions, EOD_hour=23): """ Intraday strategies will often not hold positions at the day end. This attempts to find the point in the day that best represents the activity of the strategy on that day, and effectively resamples the end-of-day positions with the positions at this point of day. The point of day is found by detecting when our exposure in the market is at its maximum point. Note that this is an estimate. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. Returns ------- pd.DataFrame Daily net position values, resampled for intraday behavior. """ # Construct DataFrame of transaction amounts txn_val = transactions.copy() txn_val.index.names = ['date'] txn_val['value'] = txn_val.amount * txn_val.price txn_val = txn_val.reset_index().pivot_table( index='date', values='value', columns='symbol').replace(np.nan, 0) # Cumulate transaction amounts each day txn_val['date'] = txn_val.index.date txn_val = txn_val.groupby('date').cumsum() # Calculate exposure, then take peak of exposure every day txn_val['exposure'] = txn_val.abs().sum(axis=1) condition = (txn_val['exposure'] == txn_val.groupby( pd.TimeGrouper('24H'))['exposure'].transform(max)) txn_val = txn_val[condition].drop('exposure', axis=1) # Compute cash delta txn_val['cash'] = -txn_val.sum(axis=1) # Shift EOD positions to positions at start of next trading day positions_shifted = positions.copy().shift(1).fillna(0) starting_capital = positions.iloc[0].sum() / (1 + returns[0]) positions_shifted.cash[0] = starting_capital # Format and add start positions to intraday position changes txn_val.index = txn_val.index.normalize() corrected_positions = positions_shifted.add(txn_val, fill_value=0) corrected_positions.index.name = 'period_close' corrected_positions.columns.name = 'sid' return corrected_positions
def clip_returns_to_benchmark(rets, benchmark_rets): """ Drop entries from rets so that the start and end dates of rets match those of benchmark_rets. Parameters ---------- rets : pd.Series Daily returns of the strategy, noncumulative. - See pf.tears.create_full_tear_sheet for more details benchmark_rets : pd.Series Daily returns of the benchmark, noncumulative. Returns ------- clipped_rets : pd.Series Daily noncumulative returns with index clipped to match that of benchmark returns. """ if (rets.index[0] < benchmark_rets.index[0]) \ or (rets.index[-1] > benchmark_rets.index[-1]): clipped_rets = rets[benchmark_rets.index] else: clipped_rets = rets return clipped_rets
def to_utc(df): """ For use in tests; applied UTC timestamp to DataFrame. """ try: df.index = df.index.tz_localize('UTC') except TypeError: df.index = df.index.tz_convert('UTC') return df
def get_symbol_rets(symbol, start=None, end=None): """ Calls the currently registered 'returns_func' Parameters ---------- symbol : object An identifier for the asset whose return series is desired. e.g. ticker symbol or database ID start : date, optional Earliest date to fetch data for. Defaults to earliest date available. end : date, optional Latest date to fetch data for. Defaults to latest date available. Returns ------- pandas.Series Returned by the current 'returns_func' """ return SETTINGS['returns_func'](symbol, start=start, end=end)
def configure_legend(ax, autofmt_xdate=True, change_colors=False, rotation=30, ha='right'): """ Format legend for perf attribution plots: - put legend to the right of plot instead of overlapping with it - make legend order match up with graph lines - set colors according to colormap """ chartBox = ax.get_position() ax.set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.75, chartBox.height]) # make legend order match graph lines handles, labels = ax.get_legend_handles_labels() handles_and_labels_sorted = sorted(zip(handles, labels), key=lambda x: x[0].get_ydata()[-1], reverse=True) handles_sorted = [h[0] for h in handles_and_labels_sorted] labels_sorted = [h[1] for h in handles_and_labels_sorted] if change_colors: for handle, color in zip(handles_sorted, cycle(COLORS)): handle.set_color(color) ax.legend(handles=handles_sorted, labels=labels_sorted, frameon=True, framealpha=0.5, loc='upper left', bbox_to_anchor=(1.05, 1), fontsize='large') # manually rotate xticklabels instead of using matplotlib's autofmt_xdate # because it disables xticklabels for all but the last plot if autofmt_xdate: for label in ax.get_xticklabels(): label.set_ha(ha) label.set_rotation(rotation)
def sample_colormap(cmap_name, n_samples): """ Sample a colormap from matplotlib """ colors = [] colormap = cm.cmap_d[cmap_name] for i in np.linspace(0, 1, n_samples): colors.append(colormap(i)) return colors
def customize(func): """ Decorator to set plotting context and axes style during function call. """ @wraps(func) def call_w_context(*args, **kwargs): set_context = kwargs.pop('set_context', True) if set_context: with plotting_context(), axes_style(): return func(*args, **kwargs) else: return func(*args, **kwargs) return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None): """ Create pyfolio default plotting style context. Under the hood, calls and returns seaborn.plotting_context() with some custom settings. Usually you would use in a with-context. Parameters ---------- context : str, optional Name of seaborn context. font_scale : float, optional Scale font by factor font_scale. rc : dict, optional Config flags. By default, {'lines.linewidth': 1.5} is being used and will be added to any rc passed in, unless explicitly overriden. Returns ------- seaborn plotting context Example ------- >>> with pyfolio.plotting.plotting_context(font_scale=2): >>> pyfolio.create_full_tear_sheet(..., set_context=False) See also -------- For more information, see seaborn.plotting_context(). """ if rc is None: rc = {} rc_default = {'lines.linewidth': 1.5} # Add defaults if they do not exist for name, val in rc_default.items(): rc.setdefault(name, val) return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None): """ Create pyfolio default axes style context. Under the hood, calls and returns seaborn.axes_style() with some custom settings. Usually you would use in a with-context. Parameters ---------- style : str, optional Name of seaborn style. rc : dict, optional Config flags. Returns ------- seaborn plotting context Example ------- >>> with pyfolio.plotting.axes_style(style='whitegrid'): >>> pyfolio.create_full_tear_sheet(..., set_context=False) See also -------- For more information, see seaborn.plotting_context(). """ if rc is None: rc = {} rc_default = {} # Add defaults if they do not exist for name, val in rc_default.items(): rc.setdefault(name, val) return sns.axes_style(style=style, rc=rc)
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs): """ Plots a heatmap of returns by month. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to seaborn plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() monthly_ret_table = ep.aggregate_returns(returns, 'monthly') monthly_ret_table = monthly_ret_table.unstack().round(3) sns.heatmap( monthly_ret_table.fillna(0) * 100.0, annot=True, annot_kws={"size": 9}, alpha=1.0, center=0.0, cbar=False, cmap=matplotlib.cm.RdYlGn, ax=ax, **kwargs) ax.set_ylabel('Year') ax.set_xlabel('Month') ax.set_title("Monthly returns (%)") return ax
def plot_annual_returns(returns, ax=None, **kwargs): """ Plots a bar graph of returns by year. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() x_axis_formatter = FuncFormatter(utils.percentage) ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter)) ax.tick_params(axis='x', which='major') ann_ret_df = pd.DataFrame( ep.aggregate_returns( returns, 'yearly')) ax.axvline( 100 * ann_ret_df.values.mean(), color='steelblue', linestyle='--', lw=4, alpha=0.7) (100 * ann_ret_df.sort_index(ascending=False) ).plot(ax=ax, kind='barh', alpha=0.70, **kwargs) ax.axvline(0.0, color='black', linestyle='-', lw=3) ax.set_ylabel('Year') ax.set_xlabel('Returns') ax.set_title("Annual returns") ax.legend(['Mean'], frameon=True, framealpha=0.5) return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs): """ Plots a distribution of monthly returns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() x_axis_formatter = FuncFormatter(utils.percentage) ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter)) ax.tick_params(axis='x', which='major') monthly_ret_table = ep.aggregate_returns(returns, 'monthly') ax.hist( 100 * monthly_ret_table, color='orangered', alpha=0.80, bins=20, **kwargs) ax.axvline( 100 * monthly_ret_table.mean(), color='gold', linestyle='--', lw=4, alpha=1.0) ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75) ax.legend(['Mean'], frameon=True, framealpha=0.5) ax.set_ylabel('Number of months') ax.set_xlabel('Returns') ax.set_title("Distribution of monthly returns") return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs): """ Plots total amount of stocks with an active position, either short or long. Displays daily total, daily average per month, and all-time daily average. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions : pd.DataFrame, optional Daily net position values. - See full explanation in tears.create_full_tear_sheet. legend_loc : matplotlib.loc, optional The location of the legend on the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() positions = positions.copy().drop('cash', axis='columns') df_holdings = positions.replace(0, np.nan).count(axis=1) df_holdings_by_month = df_holdings.resample('1M').mean() df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs) df_holdings_by_month.plot( color='orangered', lw=2, ax=ax, **kwargs) ax.axhline( df_holdings.values.mean(), color='steelblue', ls='--', lw=3) ax.set_xlim((returns.index[0], returns.index[-1])) leg = ax.legend(['Daily holdings', 'Average daily holdings, by month', 'Average daily holdings, overall'], loc=legend_loc, frameon=True, framealpha=0.5) leg.get_frame().set_edgecolor('black') ax.set_title('Total holdings') ax.set_ylabel('Holdings') ax.set_xlabel('') return ax
def plot_long_short_holdings(returns, positions, legend_loc='upper left', ax=None, **kwargs): """ Plots total amount of stocks with an active position, breaking out short and long into transparent filled regions. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions : pd.DataFrame, optional Daily net position values. - See full explanation in tears.create_full_tear_sheet. legend_loc : matplotlib.loc, optional The location of the legend on the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() positions = positions.drop('cash', axis='columns') positions = positions.replace(0, np.nan) df_longs = positions[positions > 0].count(axis=1) df_shorts = positions[positions < 0].count(axis=1) lf = ax.fill_between(df_longs.index, 0, df_longs.values, color='g', alpha=0.5, lw=2.0) sf = ax.fill_between(df_shorts.index, 0, df_shorts.values, color='r', alpha=0.5, lw=2.0) bf = patches.Rectangle([0, 0], 1, 1, color='darkgoldenrod') leg = ax.legend([lf, sf, bf], ['Long (max: %s, min: %s)' % (df_longs.max(), df_longs.min()), 'Short (max: %s, min: %s)' % (df_shorts.max(), df_shorts.min()), 'Overlap'], loc=legend_loc, frameon=True, framealpha=0.5) leg.get_frame().set_edgecolor('black') ax.set_xlim((returns.index[0], returns.index[-1])) ax.set_title('Long and short holdings') ax.set_ylabel('Holdings') ax.set_xlabel('') return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs): """ Plots cumulative returns highlighting top drawdown periods. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. top : int, optional Amount of top drawdowns periods to plot (default 10). ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) df_cum_rets = ep.cum_returns(returns, starting_value=1.0) df_drawdowns = timeseries.gen_drawdown_table(returns, top=top) df_cum_rets.plot(ax=ax, **kwargs) lim = ax.get_ylim() colors = sns.cubehelix_palette(len(df_drawdowns))[::-1] for i, (peak, recovery) in df_drawdowns[ ['Peak date', 'Recovery date']].iterrows(): if pd.isnull(recovery): recovery = returns.index[-1] ax.fill_between((peak, recovery), lim[0], lim[1], alpha=.4, color=colors[i]) ax.set_ylim(lim) ax.set_title('Top %i drawdown periods' % top) ax.set_ylabel('Cumulative returns') ax.legend(['Portfolio'], loc='upper left', frameon=True, framealpha=0.5) ax.set_xlabel('') return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs): """ Plots how far underwaterr returns are over time, or plots current drawdown vs. date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.percentage) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) df_cum_rets = ep.cum_returns(returns, starting_value=1.0) running_max = np.maximum.accumulate(df_cum_rets) underwater = -100 * ((running_max - df_cum_rets) / running_max) (underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs) ax.set_ylabel('Drawdown') ax.set_title('Underwater plot') ax.set_xlabel('') return ax
def plot_perf_stats(returns, factor_returns, ax=None): """ Create box plot of some performance metrics of the strategy. The width of the box whiskers is determined by a bootstrap. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. ax : matplotlib.Axes, optional Axes upon which to plot. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() bootstrap_values = timeseries.perf_stats_bootstrap(returns, factor_returns, return_stats=False) bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns') sns.boxplot(data=bootstrap_values, orient='h', ax=ax) return ax
def show_perf_stats(returns, factor_returns=None, positions=None, transactions=None, turnover_denom='AGB', live_start_date=None, bootstrap=False, header_rows=None): """ Prints some performance metrics of the strategy. - Shows amount of time the strategy has been run in backtest and out-of-sample (in live trading). - Shows Omega ratio, max drawdown, Calmar ratio, annual return, stability, Sharpe ratio, annual volatility, alpha, and beta. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. positions : pd.DataFrame, optional Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame, optional Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet turnover_denom : str, optional Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. bootstrap : boolean, optional Whether to perform bootstrap analysis for the performance metrics. - For more information, see timeseries.perf_stats_bootstrap header_rows : dict or OrderedDict, optional Extra rows to display at the top of the displayed table. """ if bootstrap: perf_func = timeseries.perf_stats_bootstrap else: perf_func = timeseries.perf_stats perf_stats_all = perf_func( returns, factor_returns=factor_returns, positions=positions, transactions=transactions, turnover_denom=turnover_denom) date_rows = OrderedDict() if len(returns.index) > 0: date_rows['Start date'] = returns.index[0].strftime('%Y-%m-%d') date_rows['End date'] = returns.index[-1].strftime('%Y-%m-%d') if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) returns_is = returns[returns.index < live_start_date] returns_oos = returns[returns.index >= live_start_date] positions_is = None positions_oos = None transactions_is = None transactions_oos = None if positions is not None: positions_is = positions[positions.index < live_start_date] positions_oos = positions[positions.index >= live_start_date] if transactions is not None: transactions_is = transactions[(transactions.index < live_start_date)] transactions_oos = transactions[(transactions.index > live_start_date)] perf_stats_is = perf_func( returns_is, factor_returns=factor_returns, positions=positions_is, transactions=transactions_is, turnover_denom=turnover_denom) perf_stats_oos = perf_func( returns_oos, factor_returns=factor_returns, positions=positions_oos, transactions=transactions_oos, turnover_denom=turnover_denom) if len(returns.index) > 0: date_rows['In-sample months'] = int(len(returns_is) / APPROX_BDAYS_PER_MONTH) date_rows['Out-of-sample months'] = int(len(returns_oos) / APPROX_BDAYS_PER_MONTH) perf_stats = pd.concat(OrderedDict([ ('In-sample', perf_stats_is), ('Out-of-sample', perf_stats_oos), ('All', perf_stats_all), ]), axis=1) else: if len(returns.index) > 0: date_rows['Total months'] = int(len(returns) / APPROX_BDAYS_PER_MONTH) perf_stats = pd.DataFrame(perf_stats_all, columns=['Backtest']) for column in perf_stats.columns: for stat, value in perf_stats[column].iteritems(): if stat in STAT_FUNCS_PCT: perf_stats.loc[stat, column] = str(np.round(value * 100, 1)) + '%' if header_rows is None: header_rows = date_rows else: header_rows = OrderedDict(header_rows) header_rows.update(date_rows) utils.print_table( perf_stats, float_format='{0:.2f}'.format, header_rows=header_rows, )
def plot_returns(returns, live_start_date=None, ax=None): """ Plots raw returns over time. Backtest returns are in green, and out-of-sample (live trading) returns are in red. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. live_start_date : datetime, optional The date when the strategy began live trading, after its backtest period. This date should be normalized. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() ax.set_label('') ax.set_ylabel('Returns') if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) is_returns = returns.loc[returns.index < live_start_date] oos_returns = returns.loc[returns.index >= live_start_date] is_returns.plot(ax=ax, color='g') oos_returns.plot(ax=ax, color='r') else: returns.plot(ax=ax, color='g') return ax
def plot_rolling_returns(returns, factor_returns=None, live_start_date=None, logy=False, cone_std=None, legend_loc='best', volatility_match=False, cone_function=timeseries.forecast_cone_bootstrap, ax=None, **kwargs): """ Plots cumulative rolling returns versus some benchmarks'. Backtest returns are in green, and out-of-sample (live trading) returns are in red. Additionally, a non-parametric cone plot may be added to the out-of-sample returns region. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. live_start_date : datetime, optional The date when the strategy began live trading, after its backtest period. This date should be normalized. logy : bool, optional Whether to log-scale the y-axis. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - See timeseries.forecast_cone_bounds for more details. legend_loc : matplotlib.loc, optional The location of the legend on the plot. volatility_match : bool, optional Whether to normalize the volatility of the returns to those of the benchmark returns. This helps compare strategies with different volatilities. Requires passing of benchmark_rets. cone_function : function, optional Function to use when generating forecast probability cone. The function signiture must follow the form: def cone(in_sample_returns (pd.Series), days_to_project_forward (int), cone_std= (float, or tuple), starting_value= (int, or float)) See timeseries.forecast_cone_bootstrap for an example. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() ax.set_xlabel('') ax.set_ylabel('Cumulative returns') ax.set_yscale('log' if logy else 'linear') if volatility_match and factor_returns is None: raise ValueError('volatility_match requires passing of ' 'factor_returns.') elif volatility_match and factor_returns is not None: bmark_vol = factor_returns.loc[returns.index].std() returns = (returns / returns.std()) * bmark_vol cum_rets = ep.cum_returns(returns, 1.0) y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) if factor_returns is not None: cum_factor_returns = ep.cum_returns( factor_returns[cum_rets.index], 1.0) cum_factor_returns.plot(lw=2, color='gray', label=factor_returns.name, alpha=0.60, ax=ax, **kwargs) if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date] oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date] else: is_cum_returns = cum_rets oos_cum_returns = pd.Series([]) is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6, label='Backtest', ax=ax, **kwargs) if len(oos_cum_returns) > 0: oos_cum_returns.plot(lw=4, color='red', alpha=0.6, label='Live', ax=ax, **kwargs) if cone_std is not None: if isinstance(cone_std, (float, int)): cone_std = [cone_std] is_returns = returns.loc[returns.index < live_start_date] cone_bounds = cone_function( is_returns, len(oos_cum_returns), cone_std=cone_std, starting_value=is_cum_returns[-1]) cone_bounds = cone_bounds.set_index(oos_cum_returns.index) for std in cone_std: ax.fill_between(cone_bounds.index, cone_bounds[float(std)], cone_bounds[float(-std)], color='steelblue', alpha=0.5) if legend_loc is not None: ax.legend(loc=legend_loc, frameon=True, framealpha=0.5) ax.axhline(1.0, linestyle='--', color='black', lw=2) return ax
def plot_rolling_beta(returns, factor_returns, legend_loc='best', ax=None, **kwargs): """ Plots the rolling 6-month and 12-month beta versus date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. legend_loc : matplotlib.loc, optional The location of the legend on the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) ax.set_title("Rolling portfolio beta to " + str(factor_returns.name)) ax.set_ylabel('Beta') rb_1 = timeseries.rolling_beta( returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6) rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs) rb_2 = timeseries.rolling_beta( returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12) rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs) ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3) ax.axhline(0.0, color='black', linestyle='-', lw=2) ax.set_xlabel('') ax.legend(['6-mo', '12-mo'], loc=legend_loc, frameon=True, framealpha=0.5) ax.set_ylim((-1.0, 1.0)) return ax
def plot_rolling_volatility(returns, factor_returns=None, rolling_window=APPROX_BDAYS_PER_MONTH * 6, legend_loc='best', ax=None, **kwargs): """ Plots the rolling volatility versus date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. rolling_window : int, optional The days window over which to compute the volatility. legend_loc : matplotlib.loc, optional The location of the legend on the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) rolling_vol_ts = timeseries.rolling_volatility( returns, rolling_window) rolling_vol_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax, **kwargs) if factor_returns is not None: rolling_vol_ts_factor = timeseries.rolling_volatility( factor_returns, rolling_window) rolling_vol_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax, **kwargs) ax.set_title('Rolling volatility (6-month)') ax.axhline( rolling_vol_ts.mean(), color='steelblue', linestyle='--', lw=3) ax.axhline(0.0, color='black', linestyle='-', lw=2) ax.set_ylabel('Volatility') ax.set_xlabel('') if factor_returns is None: ax.legend(['Volatility', 'Average volatility'], loc=legend_loc, frameon=True, framealpha=0.5) else: ax.legend(['Volatility', 'Benchmark volatility', 'Average volatility'], loc=legend_loc, frameon=True, framealpha=0.5) return ax
def plot_rolling_sharpe(returns, factor_returns=None, rolling_window=APPROX_BDAYS_PER_MONTH * 6, legend_loc='best', ax=None, **kwargs): """ Plots the rolling Sharpe ratio versus date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor for which the benchmark rolling Sharpe is computed. Usually a benchmark such as market returns. - This is in the same style as returns. rolling_window : int, optional The days window over which to compute the sharpe ratio. legend_loc : matplotlib.loc, optional The location of the legend on the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) rolling_sharpe_ts = timeseries.rolling_sharpe( returns, rolling_window) rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax, **kwargs) if factor_returns is not None: rolling_sharpe_ts_factor = timeseries.rolling_sharpe( factor_returns, rolling_window) rolling_sharpe_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax, **kwargs) ax.set_title('Rolling Sharpe ratio (6-month)') ax.axhline( rolling_sharpe_ts.mean(), color='steelblue', linestyle='--', lw=3) ax.axhline(0.0, color='black', linestyle='-', lw=3) ax.set_ylabel('Sharpe ratio') ax.set_xlabel('') if factor_returns is None: ax.legend(['Sharpe', 'Average'], loc=legend_loc, frameon=True, framealpha=0.5) else: ax.legend(['Sharpe', 'Benchmark Sharpe', 'Average'], loc=legend_loc, frameon=True, framealpha=0.5) return ax
def plot_gross_leverage(returns, positions, ax=None, **kwargs): """ Plots gross leverage versus date. Gross leverage is the sum of long and short exposure per share divided by net asset value. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() gl = timeseries.gross_lev(positions) gl.plot(lw=0.5, color='limegreen', legend=False, ax=ax, **kwargs) ax.axhline(gl.mean(), color='g', linestyle='--', lw=3) ax.set_title('Gross leverage') ax.set_ylabel('Gross leverage') ax.set_xlabel('') return ax
def plot_exposures(returns, positions, ax=None, **kwargs): """ Plots a cake chart of the long and short exposure. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions_alloc : pd.DataFrame Portfolio allocation of positions. See pos.get_percent_alloc. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() pos_no_cash = positions.drop('cash', axis=1) l_exp = pos_no_cash[pos_no_cash > 0].sum(axis=1) / positions.sum(axis=1) s_exp = pos_no_cash[pos_no_cash < 0].sum(axis=1) / positions.sum(axis=1) net_exp = pos_no_cash.sum(axis=1) / positions.sum(axis=1) ax.fill_between(l_exp.index, 0, l_exp.values, label='Long', color='green', alpha=0.5) ax.fill_between(s_exp.index, 0, s_exp.values, label='Short', color='red', alpha=0.5) ax.plot(net_exp.index, net_exp.values, label='Net', color='black', linestyle='dotted') ax.set_xlim((returns.index[0], returns.index[-1])) ax.set_title("Exposure") ax.set_ylabel('Exposure') ax.legend(loc='lower left', frameon=True, framealpha=0.5) ax.set_xlabel('') return ax