Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def Select(self, command, index):
"""
The Master sent a Select command to the Outstation. Handle it.
:param command: ControlRelayOutputBlock,
AnalogOutputInt16, AnalogOutputInt32, AnalogOutputFloat32, or AnalogOutputDouble64.
:param index: int
:return: CommandStatus
"""
OutstationApplication.process_point_value('Select', command, index, None)
return opendnp3.CommandStatus.SUCCESS
|
def Operate(self, command, index, op_type):
"""
The Master sent an Operate command to the Outstation. Handle it.
:param command: ControlRelayOutputBlock,
AnalogOutputInt16, AnalogOutputInt32, AnalogOutputFloat32, or AnalogOutputDouble64.
:param index: int
:param op_type: OperateType
:return: CommandStatus
"""
OutstationApplication.process_point_value('Operate', command, index, op_type)
return opendnp3.CommandStatus.SUCCESS
|
def do_a(self, line):
"""Send the Master an AnalogInput (group 32) value. Command syntax is: a index value"""
index, value_string = self.index_and_value_from_line(line)
if index and value_string:
try:
self.application.apply_update(opendnp3.Analog(float(value_string)), index)
except ValueError:
print('Please enter a floating-point value as the second argument.')
|
def do_b(self, line):
"""Send the Master a BinaryInput (group 2) value. Command syntax is: 'b index true' or 'b index false'"""
index, value_string = self.index_and_value_from_line(line)
if index and value_string:
if value_string.lower() == 'true' or value_string.lower() == 'false':
self.application.apply_update(opendnp3.Binary(value_string == 'true'), index)
else:
print('Please enter true or false as the second argument.')
|
def do_b0(self, line):
"""Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0"""
self.application.apply_update(opendnp3.Binary(False), index=6)
|
def do_c(self, line):
"""Send the Master a Counter (group 22) value. Command syntax is: c index value"""
index, value_string = self.index_and_value_from_line(line)
if index and value_string:
try:
self.application.apply_update(opendnp3.Counter(int(value_string)), index)
except ValueError:
print('Please enter an integer value as the second argument.')
|
def do_d(self, line):
"""Send the Master a DoubleBitBinaryInput (group 4) value of DETERMINED_ON. Command syntax is: d index"""
index = self.index_from_line(line)
if index:
self.application.apply_update(opendnp3.DoubleBitBinary(opendnp3.DoubleBit.DETERMINED_ON), index)
|
def do_menu(self, line):
"""Display a menu of command-line options. Command syntax is: menu"""
print('\ta\t\tAnalog measurement.\tEnter index and value as arguments.')
print('\ta2\t\tAnalog 2 for MMDC.Vol (index 4).')
print('\tb\t\tBinary measurement.\tEnter index and value as arguments.')
print('\tb0\t\tBinary False for MMDC1.Amp.range (index 6).')
print('\tc\t\tCounter measurement.\tEnter index and value as arguments.')
print('\td\t\tDoubleBit DETERMINED_ON.\tEnter index as an argument.')
print('\thelp\t\tDisplay command-line help.')
print('\tmenu\t\tDisplay this menu.')
print('\tquit')
|
def index_and_value_from_line(line):
"""Parse an index (integer) and value (string) from command line args and return them."""
try:
index = int(line.split(' ')[0])
except (ValueError, IndexError):
print('Please enter an integer index as the first argument.')
index = None
try:
value_string = line.split(' ')[1]
except (ValueError, IndexError):
print('Please enter a second argument.')
value_string = None
return index, value_string
|
def index_from_line(line):
"""Parse an index (integer) from command line args and return it."""
try:
index = int(line.split(' ')[0])
except (ValueError, IndexError):
print('Please enter an integer index as the first argument.')
index = None
return index
|
def do_menu(self, line):
"""Display a menu of command-line options. Command syntax is: menu"""
print('\tchan_log_all\tSet the channel log level to ALL_COMMS.')
print('\tchan_log_normal\tSet the channel log level to NORMAL.')
print('\tdisable_unsol\tPerform the function DISABLE_UNSOLICITED.')
print('\thelp\t\tDisplay command-line help.')
print('\tmast_log_all\tSet the master log level to ALL_COMMS.')
print('\tmast_log_normal\tSet the master log level to NORMAL.')
print('\tmenu\t\tDisplay this menu.')
print('\to1\t\tSend a DirectOperate LATCH_ON command.')
print('\to2\t\tSend a DirectOperate analog value.')
print('\to3\t\tSend a DirectOperate CommandSet.')
print('\tquit')
print('\trestart\t\tRequest an outstation cold restart.')
print('\ts1\t\tSend a SelectAndOperate LATCH_ON command.')
print('\ts2\t\tSend a SelectAndOperate CommandSet.')
print('\tscan_all\tRead data from the outstation (ScanAllObjects).')
print('\tscan_fast\tDemand immediate execution of the fast (every 1 mins) Class 1 scan.')
print('\tscan_range\tPerform an ad-hoc scan (ScanRange) of GroupVariation 1.2, range 0..3.')
print('\tscan_slow\tDemand immediate execution of the slow (every 30 mins) All-Classes scan.')
print('\twrite_time\tWrite a TimeAndInterval to the outstation.')
|
def do_chan_log_all(self, line):
"""Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all"""
self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))
print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS))
|
def do_chan_log_normal(self, line):
"""Set the channel log level to NORMAL. Command syntax is: chan_log_normal"""
self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))
print('Channel log filtering level is now: {0}'.format(opendnp3.levels.NORMAL))
|
def do_disable_unsol(self, line):
"""Perform the function DISABLE_UNSOLICITED. Command syntax is: disable_unsol"""
headers = [opendnp3.Header().AllObjects(60, 2),
opendnp3.Header().AllObjects(60, 3),
opendnp3.Header().AllObjects(60, 4)]
self.application.master.PerformFunction("disable unsolicited",
opendnp3.FunctionCode.DISABLE_UNSOLICITED,
headers,
opendnp3.TaskConfig().Default())
|
def do_mast_log_all(self, line):
"""Set the master log level to ALL_COMMS. Command syntax is: mast_log_all"""
self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))
_log.debug('Master log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS))
|
def do_mast_log_normal(self, line):
"""Set the master log level to NORMAL. Command syntax is: mast_log_normal"""
self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))
_log.debug('Master log filtering level is now: {0}'.format(opendnp3.levels.NORMAL))
|
def do_o1(self, line):
"""Send a DirectOperate BinaryOutput (group 12) index 5 LATCH_ON to the Outstation. Command syntax is: o1"""
self.application.send_direct_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON),
5,
command_callback)
|
def do_o3(self, line):
"""Send a DirectOperate BinaryOutput (group 12) CommandSet to the Outstation. Command syntax is: o3"""
self.application.send_direct_operate_command_set(opendnp3.CommandSet(
[
opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON), 0),
opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_OFF), 1)
]),
command_callback
)
|
def do_restart(self, line):
"""Request that the Outstation perform a cold restart. Command syntax is: restart"""
self.application.master.Restart(opendnp3.RestartType.COLD, restart_callback)
|
def do_s1(self, line):
"""Send a SelectAndOperate BinaryOutput (group 12) index 8 LATCH_ON to the Outstation. Command syntax is: s1"""
self.application.send_select_and_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON),
8,
command_callback)
|
def do_s2(self, line):
"""Send a SelectAndOperate BinaryOutput (group 12) CommandSet to the Outstation. Command syntax is: s2"""
self.application.send_select_and_operate_command_set(opendnp3.CommandSet(
[
opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON), 0)
]),
command_callback
)
|
def do_scan_all(self, line):
"""Call ScanAllObjects. Command syntax is: scan_all"""
self.application.master.ScanAllObjects(opendnp3.GroupVariationID(2, 1), opendnp3.TaskConfig().Default())
|
def do_scan_range(self, line):
"""Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range"""
self.application.master.ScanRange(opendnp3.GroupVariationID(1, 2), 0, 3, opendnp3.TaskConfig().Default())
|
def do_write_time(self, line):
"""Write a TimeAndInterval to the Outstation. Command syntax is: write_time"""
millis_since_epoch = int((datetime.now() - datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
self.application.master.Write(opendnp3.TimeAndInterval(opendnp3.DNPTime(millis_since_epoch),
100,
opendnp3.IntervalUnits.Seconds),
0, # index
opendnp3.TaskConfig().Default())
|
def with_bloomberg(func):
"""
Wrapper function for Bloomberg connection
Args:
func: function to wrap
"""
@wraps(func)
def wrapper(*args, **kwargs):
scope = utils.func_scope(func=func)
param = inspect.signature(func).parameters
port = kwargs.pop('port', _PORT_)
timeout = kwargs.pop('timeout', _TIMEOUT_)
restart = kwargs.pop('restart', False)
all_kw = {
k: args[n] if n < len(args) else v.default
for n, (k, v) in enumerate(param.items()) if k != 'kwargs'
}
all_kw.update(kwargs)
log_level = kwargs.get('log', logs.LOG_LEVEL)
for to_list in ['tickers', 'flds']:
conv = all_kw.get(to_list, None)
if hasattr(conv, 'tolist'):
all_kw[to_list] = getattr(conv, 'tolist')()
if isinstance(conv, str):
all_kw[to_list] = [conv]
cached_data = []
if scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']:
to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw)
cached_data += to_qry.cached_data
if not (to_qry.tickers and to_qry.flds):
if not cached_data: return pd.DataFrame()
res = pd.concat(cached_data, sort=False).reset_index(drop=True)
if not all_kw.get('raw', False):
res = assist.format_output(
data=res, source=func.__name__,
col_maps=all_kw.get('col_maps', dict())
)
return res
all_kw['tickers'] = to_qry.tickers
all_kw['flds'] = to_qry.flds
if scope in ['xbbg.blp.bdib']:
data_file = storage.hist_file(
ticker=all_kw['ticker'], dt=all_kw['dt'], typ=all_kw['typ'],
)
if files.exists(data_file):
logger = logs.get_logger(func, level=log_level)
if all_kw.get('batch', False): return
logger.debug(f'reading from {data_file} ...')
return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw)
_, new = create_connection(port=port, timeout=timeout, restart=restart)
res = func(**{
k: v for k, v in all_kw.items() if k not in ['raw', 'col_maps']
})
if new: delete_connection()
if scope.startswith('xbbg.blp.') and isinstance(res, list):
final = cached_data + res
if not final: return pd.DataFrame()
res = pd.DataFrame(pd.concat(final, sort=False))
if (scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']) \
and (not all_kw.get('raw', False)):
res = assist.format_output(
data=res.reset_index(drop=True), source=func.__name__,
col_maps=all_kw.get('col_maps', dict()),
)
return res
return wrapper
|
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):
"""
Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new)
"""
if _CON_SYM_ in globals():
if not isinstance(globals()[_CON_SYM_], pdblp.BCon):
del globals()[_CON_SYM_]
if (_CON_SYM_ in globals()) and (not restart):
con = globals()[_CON_SYM_]
if getattr(con, '_session').start(): con.start()
return con, False
else:
con = pdblp.BCon(port=port, timeout=timeout)
globals()[_CON_SYM_] = con
con.start()
return con, True
|
def delete_connection():
"""
Stop and destroy Bloomberg connection
"""
if _CON_SYM_ in globals():
con = globals().pop(_CON_SYM_)
if not getattr(con, '_session').start(): con.stop()
|
def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:
"""
Find cached `BDP` / `BDS` queries
Args:
func: function name - bdp or bds
tickers: tickers
flds: fields
**kwargs: other kwargs
Returns:
ToQuery(ticker, flds, kwargs)
"""
cache_data = []
log_level = kwargs.get('log', logs.LOG_LEVEL)
logger = logs.get_logger(bdp_bds_cache, level=log_level)
kwargs['has_date'] = kwargs.pop('has_date', func == 'bds')
kwargs['cache'] = kwargs.get('cache', True)
tickers = utils.flatten(tickers)
flds = utils.flatten(flds)
loaded = pd.DataFrame(data=0, index=tickers, columns=flds)
for ticker, fld in product(tickers, flds):
data_file = storage.ref_file(
ticker=ticker, fld=fld, ext='pkl', **{
k: v for k, v in kwargs.items() if k not in EXC_COLS
}
)
if not files.exists(data_file): continue
logger.debug(f'reading from {data_file} ...')
cache_data.append(pd.read_pickle(data_file))
loaded.loc[ticker, fld] = 1
to_qry = loaded.where(loaded == 0)\
.dropna(how='all', axis=1).dropna(how='all', axis=0)
return ToQuery(
tickers=to_qry.index.tolist(), flds=to_qry.columns.tolist(),
cached_data=cache_data
)
|
def parse_version(package):
"""
Parse versions
"""
init_file = f'{PACKAGE_ROOT}/{package}/__init__.py'
with open(init_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
if '__version__' in line:
return line.split('=')[1].strip()[1:-1]
return ''
|
def parse_markdown():
"""
Parse markdown as description
"""
readme_file = f'{PACKAGE_ROOT}/README.md'
if path.exists(readme_file):
with open(readme_file, 'r', encoding='utf-8') as f:
long_description = f.read()
return long_description
|
def parse_description(markdown=True):
"""
Parse the description in the README file
"""
if markdown: return parse_markdown()
try:
from pypandoc import convert
readme_file = f'{PACKAGE_ROOT}/docs/index.rst'
if not path.exists(readme_file):
raise ImportError
return convert(readme_file, 'rst')
except ImportError:
return parse_markdown()
|
def proc_ovrds(**kwargs):
"""
Bloomberg overrides
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_ovrds(DVD_Start_Dt='20180101')
[('DVD_Start_Dt', '20180101')]
>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)
[('DVD_Start_Dt', '20180101')]
"""
return [
(k, v) for k, v in kwargs.items()
if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS
]
|
def proc_elms(**kwargs) -> list:
"""
Bloomberg overrides for elements
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_elms(PerAdj='A', Per='W')
[('periodicityAdjustment', 'ACTUAL'), ('periodicitySelection', 'WEEKLY')]
>>> proc_elms(Days='A', Fill='B')
[('nonTradingDayFillOption', 'ALL_CALENDAR_DAYS'), ('nonTradingDayFillMethod', 'NIL_VALUE')]
>>> proc_elms(CshAdjNormal=False, CshAdjAbnormal=True)
[('adjustmentNormal', False), ('adjustmentAbnormal', True)]
>>> proc_elms(Per='W', Quote='Average', start_date='2018-01-10')
[('periodicitySelection', 'WEEKLY'), ('overrideOption', 'OVERRIDE_OPTION_GPA')]
>>> proc_elms(QuoteType='Y')
[('pricingOption', 'PRICING_OPTION_YIELD')]
>>> proc_elms(QuoteType='Y', cache=True)
[('pricingOption', 'PRICING_OPTION_YIELD')]
"""
return [
(ELEM_KEYS.get(k, k), ELEM_VALS.get(ELEM_KEYS.get(k, k), dict()).get(v, v))
for k, v in kwargs.items()
if (k in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()))
and (k not in PRSV_COLS)
]
|
def format_earning(data: pd.DataFrame, header: pd.DataFrame) -> pd.DataFrame:
"""
Standardized earning outputs and add percentage by each blocks
Args:
data: earning data block
header: earning headers
Returns:
pd.DataFrame
Examples:
>>> format_earning(
... data=pd.read_pickle('xbbg/tests/data/sample_earning.pkl'),
... header=pd.read_pickle('xbbg/tests/data/sample_earning_header.pkl')
... ).round(2)
level fy2017 fy2017_pct
Asia-Pacific 1.0 3540.0 66.43
China 2.0 1747.0 49.35
Japan 2.0 1242.0 35.08
Singapore 2.0 551.0 15.56
United States 1.0 1364.0 25.60
Europe 1.0 263.0 4.94
Other Countries 1.0 162.0 3.04
"""
if data.dropna(subset=['value']).empty: return pd.DataFrame()
res = pd.concat([
grp.loc[:, ['value']].set_index(header.value)
for _, grp in data.groupby(data.position)
], axis=1)
res.index.name = None
res.columns = res.iloc[0]
res = res.iloc[1:].transpose().reset_index().apply(
pd.to_numeric, downcast='float', errors='ignore'
)
res.rename(
columns=lambda vv: '_'.join(vv.lower().split()).replace('fy_', 'fy'),
inplace=True,
)
years = res.columns[res.columns.str.startswith('fy')]
lvl_1 = res.level == 1
for yr in years:
res.loc[:, yr] = res.loc[:, yr].round(1)
pct = f'{yr}_pct'
res.loc[:, pct] = 0.
res.loc[lvl_1, pct] = res.loc[lvl_1, pct].astype(float).round(1)
res.loc[lvl_1, pct] = res.loc[lvl_1, yr] / res.loc[lvl_1, yr].sum() * 100
sub_pct = []
for _, snap in res[::-1].iterrows():
if snap.level > 2: continue
if snap.level == 1:
if len(sub_pct) == 0: continue
sub = pd.concat(sub_pct, axis=1).transpose()
res.loc[sub.index, pct] = \
res.loc[sub.index, yr] / res.loc[sub.index, yr].sum() * 100
sub_pct = []
if snap.level == 2: sub_pct.append(snap)
res.set_index('segment_name', inplace=True)
res.index.name = None
return res
|
def format_output(data: pd.DataFrame, source, col_maps=None) -> pd.DataFrame:
"""
Format `pdblp` outputs to column-based results
Args:
data: `pdblp` result
source: `bdp` or `bds`
col_maps: rename columns with these mappings
Returns:
pd.DataFrame
Examples:
>>> format_output(
... data=pd.read_pickle('xbbg/tests/data/sample_bdp.pkl'),
... source='bdp'
... ).reset_index()
ticker name
0 QQQ US Equity INVESCO QQQ TRUST SERIES 1
1 SPY US Equity SPDR S&P 500 ETF TRUST
>>> format_output(
... data=pd.read_pickle('xbbg/tests/data/sample_dvd.pkl'),
... source='bds', col_maps={'Dividend Frequency': 'dvd_freq'}
... ).loc[:, ['ex_date', 'dividend_amount', 'dvd_freq']].reset_index()
ticker ex_date dividend_amount dvd_freq
0 C US Equity 2018-02-02 0.32 Quarter
"""
if data.empty: return pd.DataFrame()
if source == 'bdp': req_cols = ['ticker', 'field', 'value']
else: req_cols = ['ticker', 'field', 'name', 'value', 'position']
if any(col not in data for col in req_cols): return pd.DataFrame()
if data.dropna(subset=['value']).empty: return pd.DataFrame()
if source == 'bdp':
res = pd.DataFrame(pd.concat([
pd.Series({**{'ticker': t}, **grp.set_index('field').value.to_dict()})
for t, grp in data.groupby('ticker')
], axis=1, sort=False)).transpose().set_index('ticker')
else:
res = pd.DataFrame(pd.concat([
grp.loc[:, ['name', 'value']].set_index('name')
.transpose().reset_index(drop=True).assign(ticker=t)
for (t, _), grp in data.groupby(['ticker', 'position'])
], sort=False)).reset_index(drop=True).set_index('ticker')
res.columns.name = None
if col_maps is None: col_maps = dict()
return res.rename(
columns=lambda vv: col_maps.get(
vv, vv.lower().replace(' ', '_').replace('-', '_')
)
).apply(pd.to_numeric, errors='ignore', downcast='float')
|
def format_intraday(data: pd.DataFrame, ticker, **kwargs) -> pd.DataFrame:
"""
Format intraday data
Args:
data: pd.DataFrame from bdib
ticker: ticker
Returns:
pd.DataFrame
Examples:
>>> format_intraday(
... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),
... ticker='SPY US Equity',
... ).xs('close', axis=1, level=1, drop_level=False)
ticker SPY US Equity
field close
2018-12-28 09:30:00-05:00 249.67
2018-12-28 09:31:00-05:00 249.54
2018-12-28 09:32:00-05:00 249.22
2018-12-28 09:33:00-05:00 249.01
2018-12-28 09:34:00-05:00 248.86
>>> format_intraday(
... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),
... ticker='SPY US Equity', price_only=True
... )
ticker SPY US Equity
2018-12-28 09:30:00-05:00 249.67
2018-12-28 09:31:00-05:00 249.54
2018-12-28 09:32:00-05:00 249.22
2018-12-28 09:33:00-05:00 249.01
2018-12-28 09:34:00-05:00 248.86
"""
if data.empty: return pd.DataFrame()
data.columns = pd.MultiIndex.from_product([
[ticker], data.rename(columns=dict(numEvents='num_trds')).columns
], names=['ticker', 'field'])
data.index.name = None
if kwargs.get('price_only', False):
kw_xs = dict(axis=1, level=1)
close = data.xs('close', **kw_xs)
volume = data.xs('volume', **kw_xs).iloc[:, 0]
return close.loc[volume > 0] if volume.min() > 0 else close
else: return data
|
def info_qry(tickers, flds) -> str:
"""
Logging info for given tickers and fields
Args:
tickers: tickers
flds: fields
Returns:
str
Examples:
>>> print(info_qry(
... tickers=['NVDA US Equity'], flds=['Name', 'Security_Name']
... ))
tickers: ['NVDA US Equity']
fields: ['Name', 'Security_Name']
"""
full_list = '\n'.join([f'tickers: {tickers[:8]}'] + [
f' {tickers[n:(n + 8)]}' for n in range(8, len(tickers), 8)
])
return f'{full_list}\nfields: {flds}'
|
def bdp(tickers, flds, **kwargs):
"""
Bloomberg reference data
Args:
tickers: tickers
flds: fields to query
**kwargs: bbg overrides
Returns:
pd.DataFrame
Examples:
>>> bdp('IQ US Equity', 'Crncy', raw=True)
ticker field value
0 IQ US Equity Crncy USD
>>> bdp('IQ US Equity', 'Crncy').reset_index()
ticker crncy
0 IQ US Equity USD
"""
logger = logs.get_logger(bdp, level=kwargs.pop('log', logs.LOG_LEVEL))
con, _ = create_connection()
ovrds = assist.proc_ovrds(**kwargs)
logger.info(
f'loading reference data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds)
if not kwargs.get('cache', False): return [data]
qry_data = []
for r, snap in data.iterrows():
subset = [r]
data_file = storage.ref_file(
ticker=snap.ticker, fld=snap.field, ext='pkl', **kwargs
)
if data_file:
if not files.exists(data_file): qry_data.append(data.iloc[subset])
files.create_folder(data_file, is_file=True)
data.iloc[subset].to_pickle(data_file)
return qry_data
|
def bds(tickers, flds, **kwargs):
"""
Bloomberg block data
Args:
tickers: ticker(s)
flds: field(s)
**kwargs: other overrides for query
-> raw: raw output from `pdbdp` library, default False
Returns:
pd.DataFrame: block data
Examples:
>>> import os
>>>
>>> pd.options.display.width = 120
>>> s_dt, e_dt = '20180301', '20181031'
>>> dvd = bds(
... 'NVDA US Equity', 'DVD_Hist_All',
... DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt, raw=True,
... )
>>> dvd.loc[:, ['ticker', 'name', 'value']].head(8)
ticker name value
0 NVDA US Equity Declared Date 2018-08-16
1 NVDA US Equity Ex-Date 2018-08-29
2 NVDA US Equity Record Date 2018-08-30
3 NVDA US Equity Payable Date 2018-09-21
4 NVDA US Equity Dividend Amount 0.15
5 NVDA US Equity Dividend Frequency Quarter
6 NVDA US Equity Dividend Type Regular Cash
7 NVDA US Equity Declared Date 2018-05-10
>>> dvd = bds(
... 'NVDA US Equity', 'DVD_Hist_All',
... DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt,
... )
>>> dvd.reset_index().loc[:, ['ticker', 'ex_date', 'dividend_amount']]
ticker ex_date dividend_amount
0 NVDA US Equity 2018-08-29 0.15
1 NVDA US Equity 2018-05-23 0.15
>>> if not os.environ.get('BBG_ROOT', ''):
... os.environ['BBG_ROOT'] = f'{files.abspath(__file__, 1)}/tests/data'
>>> idx_kw = dict(End_Dt='20181220', cache=True)
>>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)
>>> idx_wt.round(2).tail().reset_index(drop=True)
index_member percent_weight
0 V UN 3.82
1 VZ UN 1.63
2 WBA UW 2.06
3 WMT UN 2.59
4 XOM UN 2.04
>>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)
>>> idx_wt.round(2).head().reset_index(drop=True)
index_member percent_weight
0 AAPL UW 4.65
1 AXP UN 2.84
2 BA UN 9.29
3 CAT UN 3.61
4 CSCO UW 1.26
"""
logger = logs.get_logger(bds, level=kwargs.pop('log', logs.LOG_LEVEL))
con, _ = create_connection()
ovrds = assist.proc_ovrds(**kwargs)
logger.info(
f'loading block data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
data = con.bulkref(tickers=tickers, flds=flds, ovrds=ovrds)
if not kwargs.get('cache', False): return [data]
qry_data = []
for (ticker, fld), grp in data.groupby(['ticker', 'field']):
data_file = storage.ref_file(
ticker=ticker, fld=fld, ext='pkl',
has_date=kwargs.get('has_date', True), **kwargs
)
if data_file:
if not files.exists(data_file): qry_data.append(grp)
files.create_folder(data_file, is_file=True)
grp.reset_index(drop=True).to_pickle(data_file)
return qry_data
|
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
"""
logger = logs.get_logger(bdh, level=kwargs.pop('log', logs.LOG_LEVEL))
# Dividend adjustments
if isinstance(adjust, str) and adjust:
if adjust == 'all':
kwargs['CshAdjNormal'] = True
kwargs['CshAdjAbnormal'] = True
kwargs['CapChg'] = True
else:
kwargs['CshAdjNormal'] = 'normal' in adjust or 'dvd' in adjust
kwargs['CshAdjAbnormal'] = 'abn' in adjust or 'dvd' in adjust
kwargs['CapChg'] = 'split' in adjust
con, _ = create_connection()
elms = assist.proc_elms(**kwargs)
ovrds = assist.proc_ovrds(**kwargs)
if isinstance(tickers, str): tickers = [tickers]
if flds is None: flds = ['Last_Price']
if isinstance(flds, str): flds = [flds]
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None:
start_date = pd.Timestamp(e_dt) - relativedelta(months=3)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
logger.info(
f'loading historical data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
logger.debug(
f'\nflds={flds}\nelms={elms}\novrds={ovrds}\nstart_date={s_dt}\nend_date={e_dt}'
)
res = con.bdh(
tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt
)
res.index.name = None
if (len(flds) == 1) and kwargs.get('keep_one', False):
return res.xs(flds[0], axis=1, level=1)
return res
|
def bdib(ticker, dt, typ='TRADE', **kwargs) -> pd.DataFrame:
"""
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
"""
from xbbg.core import missing
logger = logs.get_logger(bdib, level=kwargs.pop('log', logs.LOG_LEVEL))
t_1 = pd.Timestamp('today').date() - pd.Timedelta('1D')
whole_day = pd.Timestamp(dt).date() < t_1
batch = kwargs.pop('batch', False)
if (not whole_day) and batch:
logger.warning(f'querying date {t_1} is too close, ignoring download ...')
return pd.DataFrame()
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
asset = ticker.split()[-1]
info_log = f'{ticker} / {cur_dt} / {typ}'
if asset in ['Equity', 'Curncy', 'Index', 'Comdty']:
exch = const.exch_info(ticker=ticker)
if exch.empty: return pd.DataFrame()
else:
logger.error(f'unknown asset type: {asset}')
return pd.DataFrame()
time_fmt = '%Y-%m-%dT%H:%M:%S'
time_idx = pd.DatetimeIndex([
f'{cur_dt} {exch.allday[0]}', f'{cur_dt} {exch.allday[-1]}']
).tz_localize(exch.tz).tz_convert(DEFAULT_TZ).tz_convert('UTC')
if time_idx[0] > time_idx[1]: time_idx -= pd.TimedeltaIndex(['1D', '0D'])
q_tckr = ticker
if exch.get('is_fut', False):
if 'freq' not in exch:
logger.error(f'[freq] missing in info for {info_log} ...')
is_sprd = exch.get('has_sprd', False) and (len(ticker[:-1]) != exch['tickers'][0])
if not is_sprd:
q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['freq'])
if q_tckr == '':
logger.error(f'cannot find futures ticker for {ticker} ...')
return pd.DataFrame()
info_log = f'{q_tckr} / {cur_dt} / {typ}'
miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='bdib')
cur_miss = missing.current_missing(**miss_kw)
if cur_miss >= 2:
if batch: return pd.DataFrame()
logger.info(f'{cur_miss} trials with no data {info_log}')
return pd.DataFrame()
logger.info(f'loading data from Bloomberg: {info_log} ...')
con, _ = create_connection()
try:
data = con.bdib(
ticker=q_tckr, event_type=typ, interval=1,
start_datetime=time_idx[0].strftime(time_fmt),
end_datetime=time_idx[1].strftime(time_fmt),
)
except KeyError:
# Ignores missing data errors from pdblp library
# Warning msg will be displayed later
data = pd.DataFrame()
if not isinstance(data, pd.DataFrame):
raise ValueError(f'unknown output format: {type(data)}')
if data.empty:
logger.warning(f'no data for {info_log} ...')
missing.update_missing(**miss_kw)
return pd.DataFrame()
data = data.tz_localize('UTC').tz_convert(exch.tz)
storage.save_intraday(data=data, ticker=ticker, dt=dt, typ=typ)
return pd.DataFrame() if batch else assist.format_intraday(data=data, ticker=ticker)
|
def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame:
"""
Bloomberg intraday bar data within market session
Args:
ticker: ticker
dt: date
session: examples include
day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000
**kwargs:
ref: reference ticker or exchange for timezone
keep_tz: if keep tz if reference ticker / exchange is given
start_time: start time
end_time: end time
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Returns:
pd.DataFrame
"""
from xbbg.core import intervals
cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE'))
if cur_data.empty: return pd.DataFrame()
fmt = '%H:%M:%S'
ss = intervals.SessNA
ref = kwargs.get('ref', None)
exch = pd.Series() if ref is None else const.exch_info(ticker=ref)
if session: ss = intervals.get_interval(
ticker=kwargs.get('ref', ticker), session=session
)
start_time = kwargs.get('start_time', None)
end_time = kwargs.get('end_time', None)
if ss != intervals.SessNA:
start_time = pd.Timestamp(ss.start_time).strftime(fmt)
end_time = pd.Timestamp(ss.end_time).strftime(fmt)
if start_time and end_time:
kw = dict(start_time=start_time, end_time=end_time)
if not exch.empty:
cur_tz = cur_data.index.tz
res = cur_data.tz_convert(exch.tz).between_time(**kw)
if kwargs.get('keep_tz', False):
res = res.tz_convert(cur_tz)
return pd.DataFrame(res)
return pd.DataFrame(cur_data.between_time(**kw))
return cur_data
|
def earning(
ticker, by='Geo', typ='Revenue', ccy=None, level=None, **kwargs
) -> pd.DataFrame:
"""
Earning exposures by Geo or Products
Args:
ticker: ticker name
by: [G(eo), P(roduct)]
typ: type of earning, start with `PG_` in Bloomberg FLDS - default `Revenue`
ccy: currency of earnings
level: hierarchy level of earnings
Returns:
pd.DataFrame
Examples:
>>> data = earning('AMD US Equity', Eqy_Fund_Year=2017, Number_Of_Periods=1)
>>> data.round(2)
level fy2017 fy2017_pct
Asia-Pacific 1.0 3540.0 66.43
China 2.0 1747.0 49.35
Japan 2.0 1242.0 35.08
Singapore 2.0 551.0 15.56
United States 1.0 1364.0 25.60
Europe 1.0 263.0 4.94
Other Countries 1.0 162.0 3.04
"""
ovrd = 'G' if by[0].upper() == 'G' else 'P'
new_kw = dict(raw=True, Product_Geo_Override=ovrd)
header = bds(tickers=ticker, flds='PG_Bulk_Header', **new_kw, **kwargs)
if ccy: kwargs['Eqy_Fund_Crncy'] = ccy
if level: kwargs['PG_Hierarchy_Level'] = level
data = bds(tickers=ticker, flds=f'PG_{typ}', **new_kw, **kwargs)
return assist.format_earning(data=data, header=header)
|
def dividend(
tickers, typ='all', start_date=None, end_date=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg dividend / split history
Args:
tickers: list of tickers
typ: dividend adjustment type
`all`: `DVD_Hist_All`
`dvd`: `DVD_Hist`
`split`: `Eqy_DVD_Hist_Splits`
`gross`: `Eqy_DVD_Hist_Gross`
`adjust`: `Eqy_DVD_Adjust_Fact`
`adj_fund`: `Eqy_DVD_Adj_Fund`
`with_amt`: `DVD_Hist_All_with_Amt_Status`
`dvd_amt`: `DVD_Hist_with_Amt_Status`
`gross_amt`: `DVD_Hist_Gross_with_Amt_Stat`
`projected`: `BDVD_Pr_Ex_Dts_DVD_Amts_w_Ann`
start_date: start date
end_date: end date
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = dividend(
... tickers=['C US Equity', 'NVDA US Equity', 'MS US Equity'],
... start_date='2018-01-01', end_date='2018-05-01'
... )
>>> res.index.name = None
>>> res.loc[:, ['ex_date', 'rec_date', 'dvd_amt']].round(2)
ex_date rec_date dvd_amt
C US Equity 2018-02-02 2018-02-05 0.32
MS US Equity 2018-04-27 2018-04-30 0.25
MS US Equity 2018-01-30 2018-01-31 0.25
NVDA US Equity 2018-02-22 2018-02-23 0.15
"""
if isinstance(tickers, str): tickers = [tickers]
tickers = [t for t in tickers if ('Equity' in t) and ('=' not in t)]
fld = {
'all': 'DVD_Hist_All', 'dvd': 'DVD_Hist',
'split': 'Eqy_DVD_Hist_Splits', 'gross': 'Eqy_DVD_Hist_Gross',
'adjust': 'Eqy_DVD_Adjust_Fact', 'adj_fund': 'Eqy_DVD_Adj_Fund',
'with_amt': 'DVD_Hist_All_with_Amt_Status',
'dvd_amt': 'DVD_Hist_with_Amt_Status',
'gross_amt': 'DVD_Hist_Gross_with_Amt_Stat',
'projected': 'BDVD_Pr_Ex_Dts_DVD_Amts_w_Ann',
}.get(typ, typ)
if (fld == 'Eqy_DVD_Adjust_Fact') and ('Corporate_Actions_Filter' not in kwargs):
kwargs['Corporate_Actions_Filter'] = 'NORMAL_CASH|ABNORMAL_CASH|CAPITAL_CHANGE'
if fld in [
'DVD_Hist_All', 'DVD_Hist', 'Eqy_DVD_Hist_Gross',
'DVD_Hist_All_with_Amt_Status', 'DVD_Hist_with_Amt_Status',
]:
if start_date: kwargs['DVD_Start_Dt'] = utils.fmt_dt(start_date, fmt='%Y%m%d')
if end_date: kwargs['DVD_End_Dt'] = utils.fmt_dt(end_date, fmt='%Y%m%d')
kwargs['col_maps'] = {
'Declared Date': 'dec_date', 'Ex-Date': 'ex_date',
'Record Date': 'rec_date', 'Payable Date': 'pay_date',
'Dividend Amount': 'dvd_amt', 'Dividend Frequency': 'dvd_freq',
'Dividend Type': 'dvd_type', 'Amount Status': 'amt_status',
'Adjustment Date': 'adj_date', 'Adjustment Factor': 'adj_factor',
'Adjustment Factor Operator Type': 'adj_op',
'Adjustment Factor Flag': 'adj_flag',
'Amount Per Share': 'amt_ps', 'Projected/Confirmed': 'category',
}
return bds(tickers=tickers, flds=fld, raw=False, **kwargs)
|
def active_futures(ticker: str, dt) -> str:
"""
Active futures contract
Args:
ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.
dt: date
Returns:
str: ticker name
"""
t_info = ticker.split()
prefix, asset = ' '.join(t_info[:-1]), t_info[-1]
info = const.market_info(f'{prefix[:-1]}1 {asset}')
f1, f2 = f'{prefix[:-1]}1 {asset}', f'{prefix[:-1]}2 {asset}'
fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['freq'])
fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['freq'])
fut_tk = bdp(tickers=[fut_1, fut_2], flds='Last_Tradeable_Dt', cache=True)
if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[0]).month: return fut_1
d1 = bdib(ticker=f1, dt=dt)
d2 = bdib(ticker=f2, dt=dt)
return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2
|
def fut_ticker(gen_ticker: str, dt, freq: str, log=logs.LOG_LEVEL) -> str:
"""
Get proper ticker from generic ticker
Args:
gen_ticker: generic ticker
dt: date
freq: futures contract frequency
log: level of logs
Returns:
str: exact futures ticker
"""
logger = logs.get_logger(fut_ticker, level=log)
dt = pd.Timestamp(dt)
t_info = gen_ticker.split()
asset = t_info[-1]
if asset in ['Index', 'Curncy', 'Comdty']:
ticker = ' '.join(t_info[:-1])
prefix, idx, postfix = ticker[:-1], int(ticker[-1]) - 1, asset
elif asset == 'Equity':
ticker = t_info[0]
prefix, idx, postfix = ticker[:-1], int(ticker[-1]) - 1, ' '.join(t_info[1:])
else:
logger.error(f'unkonwn asset type for ticker: {gen_ticker}')
return ''
month_ext = 4 if asset == 'Comdty' else 2
months = pd.date_range(start=dt, periods=max(idx + month_ext, 3), freq=freq)
logger.debug(f'pulling expiry dates for months: {months}')
def to_fut(month):
return prefix + const.Futures[month.strftime('%b')] + \
month.strftime('%y')[-1] + ' ' + postfix
fut = [to_fut(m) for m in months]
logger.debug(f'trying futures: {fut}')
# noinspection PyBroadException
try:
fut_matu = bdp(tickers=fut, flds='last_tradeable_dt', cache=True)
except Exception as e1:
logger.error(f'error downloading futures contracts (1st trial) {e1}:\n{fut}')
# noinspection PyBroadException
try:
fut = fut[:-1]
logger.debug(f'trying futures (2nd trial): {fut}')
fut_matu = bdp(tickers=fut, flds='last_tradeable_dt', cache=True)
except Exception as e2:
logger.error(f'error downloading futures contracts (2nd trial) {e2}:\n{fut}')
return ''
sub_fut = fut_matu[pd.DatetimeIndex(fut_matu.last_tradeable_dt) > dt]
logger.debug(f'futures full chain:\n{fut_matu.to_string()}')
logger.debug(f'getting index {idx} from:\n{sub_fut.to_string()}')
return sub_fut.index.values[idx]
|
def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:
"""
Check exchange hours vs local hours
Args:
tickers: list of tickers
tz_exch: exchange timezone
tz_loc: local timezone
Returns:
Local and exchange hours
"""
cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD']
con, _ = create_connection()
hours = con.ref(tickers=tickers, flds=cols)
cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ')
hours.loc[:, 'local'] = hours.value.astype(str).str[:-3]
hours.loc[:, 'exch'] = pd.DatetimeIndex(
cur_dt + hours.value.astype(str)
).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M')
hours = pd.concat([
hours.set_index(['ticker', 'field']).exch.unstack().loc[:, cols],
hours.set_index(['ticker', 'field']).local.unstack().loc[:, cols],
], axis=1)
hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End']
return hours
|
def hist_file(ticker: str, dt, typ='TRADE') -> str:
"""
Data file location for Bloomberg historical data
Args:
ticker: ticker name
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Returns:
file location
Examples:
>>> os.environ['BBG_ROOT'] = ''
>>> hist_file(ticker='ES1 Index', dt='2018-08-01') == ''
True
>>> os.environ['BBG_ROOT'] = '/data/bbg'
>>> hist_file(ticker='ES1 Index', dt='2018-08-01')
'/data/bbg/Index/ES1 Index/TRADE/2018-08-01.parq'
"""
data_path = os.environ.get(assist.BBG_ROOT, '').replace('\\', '/')
if not data_path: return ''
asset = ticker.split()[-1]
proper_ticker = ticker.replace('/', '_')
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
return f'{data_path}/{asset}/{proper_ticker}/{typ}/{cur_dt}.parq'
|
def ref_file(
ticker: str, fld: str, has_date=False, cache=False, ext='parq', **kwargs
) -> str:
"""
Data file location for Bloomberg reference data
Args:
ticker: ticker name
fld: field
has_date: whether add current date to data file
cache: if has_date is True, whether to load file from latest cached
ext: file extension
**kwargs: other overrides passed to ref function
Returns:
file location
Examples:
>>> import shutil
>>>
>>> os.environ['BBG_ROOT'] = ''
>>> ref_file('BLT LN Equity', fld='Crncy') == ''
True
>>> os.environ['BBG_ROOT'] = '/data/bbg'
>>> ref_file('BLT LN Equity', fld='Crncy', cache=True)
'/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq'
>>> ref_file('BLT LN Equity', fld='Crncy')
''
>>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ)
>>> ref_file(
... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True,
... ).replace(cur_dt, '[cur_date]')
'/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq'
>>> ref_file(
... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True,
... cache=True, DVD_Start_Dt='20180101',
... ).replace(cur_dt, '[cur_date]')[:-5]
'/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101'
>>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'
>>> root_path = 'xbbg/tests/data'
>>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All'
>>> os.environ['BBG_ROOT'] = root_path
>>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file)
>>> files.create_folder(sub_path)
>>> sample in shutil.copy(f'{root_path}/{sample}', sub_path)
True
>>> new_file = ref_file(
... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',
... has_date=True, cache=True, ext='pkl'
... )
>>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl'
True
>>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'
>>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file])
>>> updated_file = old_full.replace('2018-11-02', cur_dt)
>>> updated_file in shutil.copy(old_full, updated_file)
True
>>> exist_file = ref_file(
... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',
... has_date=True, cache=True, ext='pkl'
... )
>>> exist_file == updated_file
False
>>> exist_file = ref_file(
... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',
... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl'
... )
>>> exist_file == updated_file
True
"""
data_path = os.environ.get(assist.BBG_ROOT, '').replace('\\', '/')
if (not data_path) or (not cache): return ''
proper_ticker = ticker.replace('/', '_')
cache_days = kwargs.pop('cache_days', 10)
root = f'{data_path}/{ticker.split()[-1]}/{proper_ticker}/{fld}'
if len(kwargs) > 0: info = utils.to_str(kwargs)[1:-1].replace('|', '_')
else: info = 'ovrd=None'
# Check date info
if has_date:
cur_dt = utils.cur_time()
missing = f'{root}/asof={cur_dt}, {info}.{ext}'
to_find = re.compile(rf'{root}/asof=(.*), {info}\.pkl')
cur_files = list(filter(to_find.match, sorted(
files.all_files(path_name=root, keyword=info, ext=ext)
)))
if len(cur_files) > 0:
upd_dt = to_find.match(cur_files[-1]).group(1)
diff = pd.Timestamp('today') - pd.Timestamp(upd_dt)
if diff >= pd.Timedelta(days=cache_days): return missing
return sorted(cur_files)[-1]
else: return missing
else: return f'{root}/{info}.{ext}'
|
def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'):
"""
Check whether data is done for the day and save
Args:
data: data
ticker: ticker
dt: date
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Examples:
>>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'
>>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')
>>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')
>>> # Invalid exchange
>>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')
>>> # Invalid empty data
>>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')
>>> # Invalid date - too close
>>> cur_dt = utils.cur_time()
>>> save_intraday(sample, 'AAPL US Equity', cur_dt)
"""
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
logger = logs.get_logger(save_intraday, level='debug')
info = f'{ticker} / {cur_dt} / {typ}'
data_file = hist_file(ticker=ticker, dt=dt, typ=typ)
if not data_file: return
if data.empty:
logger.warning(f'data is empty for {info} ...')
return
exch = const.exch_info(ticker=ticker)
if exch.empty: return
end_time = pd.Timestamp(
const.market_timing(ticker=ticker, dt=dt, timing='FINISHED')
).tz_localize(exch.tz)
now = pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H')
if end_time > now:
logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...')
return
logger.info(f'saving data to {data_file} ...')
files.create_folder(data_file, is_file=True)
data.to_parquet(data_file)
|
def exch_info(ticker: str) -> pd.Series:
"""
Exchange info for given ticker
Args:
ticker: ticker or exchange
Returns:
pd.Series
Examples:
>>> exch_info('SPY US Equity')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
pre [04:00, 09:30]
post [16:01, 20:00]
dtype: object
>>> exch_info('ES1 Index')
tz America/New_York
allday [18:00, 17:00]
day [08:00, 17:00]
dtype: object
>>> exch_info('Z 1 Index')
tz Europe/London
allday [01:00, 21:00]
day [01:00, 21:00]
dtype: object
>>> exch_info('TESTTICKER Corp').empty
True
>>> exch_info('US')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
pre [04:00, 09:30]
post [16:01, 20:00]
dtype: object
"""
logger = logs.get_logger(exch_info, level='debug')
if ' ' not in ticker.strip():
ticker = f'XYZ {ticker.strip()} Equity'
info = param.load_info(cat='exch').get(
market_info(ticker=ticker).get('exch', ''), dict()
)
if ('allday' in info) and ('day' not in info):
info['day'] = info['allday']
if any(req not in info for req in ['tz', 'allday', 'day']):
logger.error(f'required exchange info cannot be found in {ticker} ...')
return pd.Series()
for ss in ValidSessions:
if ss not in info: continue
info[ss] = [param.to_hour(num=s) for s in info[ss]]
return pd.Series(info)
|
def market_info(ticker: str) -> dict:
"""
Get info for given market
Args:
ticker: Bloomberg full ticker
Returns:
dict
Examples:
>>> info = market_info('SHCOMP Index')
>>> info['exch']
'EquityChina'
>>> info = market_info('ICICIC=1 IS Equity')
>>> info['freq'], info['is_fut']
('M', True)
>>> info = market_info('INT1 Curncy')
>>> info['freq'], info['is_fut']
('M', True)
>>> info = market_info('CL1 Comdty')
>>> info['freq'], info['is_fut']
('M', True)
>>> # Wrong tickers
>>> market_info('C XX Equity')
{}
>>> market_info('XXX Comdty')
{}
>>> market_info('Bond_ISIN Corp')
{}
>>> market_info('XYZ Index')
{}
>>> market_info('XYZ Curncy')
{}
"""
t_info = ticker.split()
assets = param.load_info('assets')
# ========================== #
# Equity #
# ========================== #
if (t_info[-1] == 'Equity') and ('=' not in t_info[0]):
exch = t_info[-2]
for info in assets.get('Equity', [dict()]):
if 'exch_codes' not in info: continue
if exch in info['exch_codes']: return info
return dict()
# ============================ #
# Currency #
# ============================ #
if t_info[-1] == 'Curncy':
for info in assets.get('Curncy', [dict()]):
if 'tickers' not in info: continue
if (t_info[0].split('+')[0] in info['tickers']) or \
(t_info[0][-1].isdigit() and (t_info[0][:-1] in info['tickers'])):
return info
return dict()
if t_info[-1] == 'Comdty':
for info in assets.get('Comdty', [dict()]):
if 'tickers' not in info: continue
if t_info[0][:-1] in info['tickers']: return info
return dict()
# =================================== #
# Index / Futures #
# =================================== #
if (t_info[-1] == 'Index') or (
(t_info[-1] == 'Equity') and ('=' in t_info[0])
):
if t_info[-1] == 'Equity':
tck = t_info[0].split('=')[0]
else:
tck = ' '.join(t_info[:-1])
for info in assets.get('Index', [dict()]):
if 'tickers' not in info: continue
if (tck[:2] == 'UX') and ('UX' in info['tickers']): return info
if tck in info['tickers']:
if t_info[-1] == 'Equity': return info
if not info.get('is_fut', False): return info
if tck[:-1].rstrip() in info['tickers']:
if info.get('is_fut', False): return info
return dict()
if t_info[-1] == 'Corp':
for info in assets.get('Corp', [dict()]):
if 'ticker' not in info: continue
return dict()
|
def ccy_pair(local, base='USD') -> CurrencyPair:
"""
Currency pair info
Args:
local: local currency
base: base currency
Returns:
CurrencyPair
Examples:
>>> ccy_pair(local='HKD', base='USD')
CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)
>>> ccy_pair(local='GBp')
CurrencyPair(ticker='GBP Curncy', factor=100, power=-1)
>>> ccy_pair(local='USD', base='GBp')
CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)
>>> ccy_pair(local='XYZ', base='USD')
CurrencyPair(ticker='', factor=1.0, power=1)
>>> ccy_pair(local='GBP', base='GBp')
CurrencyPair(ticker='', factor=0.01, power=1)
>>> ccy_pair(local='GBp', base='GBP')
CurrencyPair(ticker='', factor=100.0, power=1)
"""
ccy_param = param.load_info(cat='ccy')
if f'{local}{base}' in ccy_param:
info = ccy_param[f'{local}{base}']
elif f'{base}{local}' in ccy_param:
info = ccy_param[f'{base}{local}']
info['factor'] = 1. / info.get('factor', 1.)
info['power'] = -info.get('power', 1)
elif base.lower() == local.lower():
info = dict(ticker='')
info['factor'] = 1.
if base[-1].lower() == base[-1]:
info['factor'] /= 100.
if local[-1].lower() == local[-1]:
info['factor'] *= 100.
else:
logger = logs.get_logger(ccy_pair)
logger.error(f'incorrect currency - local {local} / base {base}')
return CurrencyPair(ticker='', factor=1., power=1)
if 'factor' not in info: info['factor'] = 1.
if 'power' not in info: info['power'] = 1
return CurrencyPair(**info)
|
def market_timing(ticker, dt, timing='EOD', tz='local') -> str:
"""
Market close time for ticker
Args:
ticker: ticker name
dt: date
timing: [EOD (default), BOD]
tz: conversion to timezone
Returns:
str: date & time
Examples:
>>> market_timing('7267 JT Equity', dt='2018-09-10')
'2018-09-10 14:58'
>>> market_timing('7267 JT Equity', dt='2018-09-10', tz=timezone.TimeZone.NY)
'2018-09-10 01:58:00-04:00'
>>> market_timing('7267 JT Equity', dt='2018-01-10', tz='NY')
'2018-01-10 00:58:00-05:00'
>>> market_timing('7267 JT Equity', dt='2018-09-10', tz='SPX Index')
'2018-09-10 01:58:00-04:00'
>>> market_timing('8035 JT Equity', dt='2018-09-10', timing='BOD')
'2018-09-10 09:01'
>>> market_timing('Z 1 Index', dt='2018-09-10', timing='FINISHED')
'2018-09-10 21:00'
>>> market_timing('TESTTICKER Corp', dt='2018-09-10')
''
"""
logger = logs.get_logger(market_timing)
exch = pd.Series(exch_info(ticker=ticker))
if any(req not in exch.index for req in ['tz', 'allday', 'day']):
logger.error(f'required exchange info cannot be found in {ticker} ...')
return ''
mkt_time = {
'BOD': exch.day[0], 'FINISHED': exch.allday[-1]
}.get(timing, exch.day[-1])
cur_dt = pd.Timestamp(str(dt)).strftime('%Y-%m-%d')
if tz == 'local':
return f'{cur_dt} {mkt_time}'
return timezone.tz_convert(f'{cur_dt} {mkt_time}', to_tz=tz, from_tz=exch.tz)
|
def flatten(iterable, maps=None, unique=False) -> list:
"""
Flatten any array of items to list
Args:
iterable: any array or value
maps: map items to values
unique: drop duplicates
Returns:
list: flattened list
References:
https://stackoverflow.com/a/40857703/1332656
Examples:
>>> flatten('abc')
['abc']
>>> flatten(1)
[1]
>>> flatten(1.)
[1.0]
>>> flatten(['ab', 'cd', ['xy', 'zz']])
['ab', 'cd', 'xy', 'zz']
>>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})
['ab', '0x', 'zz']
"""
if iterable is None: return []
if maps is None: maps = dict()
if isinstance(iterable, (str, int, float)):
return [maps.get(iterable, iterable)]
else:
x = [maps.get(item, item) for item in _to_gen_(iterable)]
return list(set(x)) if unique else x
|
def _to_gen_(iterable):
"""
Recursively iterate lists and tuples
"""
from collections import Iterable
for elm in iterable:
if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):
yield from flatten(elm)
else: yield elm
|
def cur_time(typ='date', tz=DEFAULT_TZ) -> (datetime.date, str):
"""
Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> cur_time(typ='') == cur_dt.date()
True
"""
dt = pd.Timestamp('now', tz=tz)
if typ == 'date': return dt.strftime('%Y-%m-%d')
if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S')
if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S')
if typ == 'raw': return dt
return dt.date()
|
def to_str(
data: dict, fmt='{key}={value}', sep=', ', public_only=True
) -> str:
"""
Convert dict to string
Args:
data: dict
fmt: how key and value being represented
sep: how pairs of key and value are seperated
public_only: if display public members only
Returns:
str: string representation of dict
Examples:
>>> test_dict = dict(b=1, a=0, c=2, _d=3)
>>> to_str(test_dict)
'{b=1, a=0, c=2}'
>>> to_str(test_dict, sep='|')
'{b=1|a=0|c=2}'
>>> to_str(test_dict, public_only=False)
'{b=1, a=0, c=2, _d=3}'
"""
if public_only: keys = list(filter(lambda vv: vv[0] != '_', data.keys()))
else: keys = list(data.keys())
return '{' + sep.join([
to_str(data=v, fmt=fmt, sep=sep)
if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)
for k, v in data.items() if k in keys
]) + '}'
|
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/timezone.py').__name__
'timezone'
>>> load_module(f'{cur_path}/timezone.pyc')
Traceback (most recent call last):
ImportError: not a python file: timezone.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module
|
def load_info(cat):
"""
Load parameters for assets
Args:
cat: category
Returns:
dict
Examples:
>>> import pandas as pd
>>>
>>> assets = load_info(cat='assets')
>>> all(cat in assets for cat in ['Equity', 'Index', 'Curncy', 'Corp'])
True
>>> os.environ['BBG_PATH'] = ''
>>> exch = load_info(cat='exch')
>>> pd.Series(exch['EquityUS']).allday
[400, 2000]
>>> test_root = f'{PKG_PATH}/tests'
>>> os.environ['BBG_PATH'] = test_root
>>> ovrd_exch = load_info(cat='exch')
>>> # Somehow os.environ is not set properly in doctest environment
>>> ovrd_exch.update(_load_yaml_(f'{test_root}/markets/exch.yml'))
>>> pd.Series(ovrd_exch['EquityUS']).allday
[300, 2100]
"""
res = _load_yaml_(f'{PKG_PATH}/markets/{cat}.yml')
root = os.environ.get('BBG_ROOT', '').replace('\\', '/')
if not root: return res
for cat, ovrd in _load_yaml_(f'{root}/markets/{cat}.yml').items():
if isinstance(ovrd, dict):
if cat in res: res[cat].update(ovrd)
else: res[cat] = ovrd
if isinstance(ovrd, list) and isinstance(res[cat], list): res[cat] += ovrd
return res
|
def _load_yaml_(file_name):
"""
Load assets infomation from file
Args:
file_name: file name
Returns:
dict
"""
if not os.path.exists(file_name): return dict()
with open(file_name, 'r', encoding='utf-8') as fp:
return YAML().load(stream=fp)
|
def to_hour(num) -> str:
"""
Convert YAML input to hours
Args:
num: number in YMAL file, e.g., 900, 1700, etc.
Returns:
str
Examples:
>>> to_hour(900)
'09:00'
>>> to_hour(1700)
'17:00'
"""
to_str = str(int(num))
return pd.Timestamp(f'{to_str[:-2]}:{to_str[-2:]}').strftime('%H:%M')
|
def abspath(cur_file, parent=0) -> str:
"""
Absolute path
Args:
cur_file: __file__ or file or path str
parent: level of parent to look for
Returns:
str
"""
file_path = os.path.abspath(cur_file).replace('\\', '/')
if os.path.isdir(file_path) and parent == 0: return file_path
adj = 1 - os.path.isdir(file_path)
return '/'.join(file_path.split('/')[:-(parent + adj)])
|
def create_folder(path_name: str, is_file=False):
"""
Make folder as well as all parent folders if not exists
Args:
path_name: full path name
is_file: whether input is name of file
"""
path_sep = path_name.replace('\\', '/').split('/')
for i in range(1, len(path_sep) + (0 if is_file else 1)):
cur_path = '/'.join(path_sep[:i])
if not os.path.exists(cur_path): os.mkdir(cur_path)
|
def all_files(
path_name, keyword='', ext='', full_path=True,
has_date=False, date_fmt=DATE_FMT
) -> list:
"""
Search all files with criteria
Returned list will be sorted by last modified
Args:
path_name: full path name
keyword: keyword to search
ext: file extensions, split by ','
full_path: whether return full path (default True)
has_date: whether has date in file name (default False)
date_fmt: date format to check for has_date parameter
Returns:
list: all file names with criteria fulfilled
"""
if not os.path.exists(path=path_name): return []
path_name = path_name.replace('\\', '/')
if keyword or ext:
keyword = f'*{keyword}*' if keyword else '*'
if not ext: ext = '*'
files = sort_by_modified([
f.replace('\\', '/') for f in glob.iglob(f'{path_name}/{keyword}.{ext}')
if os.path.isfile(f) and (f.replace('\\', '/').split('/')[-1][0] != '~')
])
else:
files = sort_by_modified([
f'{path_name}/{f}' for f in os.listdir(path=path_name)
if os.path.isfile(f'{path_name}/{f}') and (f[0] != '~')
])
if has_date:
files = filter_by_dates(files, date_fmt=date_fmt)
return files if full_path else [f.split('/')[-1] for f in files]
|
def all_folders(
path_name, keyword='', has_date=False, date_fmt=DATE_FMT
) -> list:
"""
Search all folders with criteria
Returned list will be sorted by last modified
Args:
path_name: full path name
keyword: keyword to search
has_date: whether has date in file name (default False)
date_fmt: date format to check for has_date parameter
Returns:
list: all folder names fulfilled criteria
"""
if not os.path.exists(path=path_name): return []
path_name = path_name.replace('\\', '/')
if keyword:
folders = sort_by_modified([
f.replace('\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*')
if os.path.isdir(f) and (f.replace('\\', '/').split('/')[-1][0] != '~')
])
else:
folders = sort_by_modified([
f'{path_name}/{f}' for f in os.listdir(path=path_name)
if os.path.isdir(f'{path_name}/{f}') and (f[0] != '~')
])
if has_date:
folders = filter_by_dates(folders, date_fmt=date_fmt)
return folders
|
def sort_by_modified(files_or_folders: list) -> list:
"""
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
"""
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
|
def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:
"""
Filter files or dates by date patterns
Args:
files_or_folders: list of files or folders
date_fmt: date format
Returns:
list
"""
r = re.compile(f'.*{date_fmt}.*')
return list(filter(
lambda vv: r.match(vv.replace('\\', '/').split('/')[-1]) is not None,
files_or_folders,
))
|
def file_modified_time(file_name) -> pd.Timestamp:
"""
File modified time in python
Args:
file_name: file name
Returns:
pd.Timestamp
"""
return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
|
def get_interval(ticker, session) -> Session:
"""
Get interval from defined session
Args:
ticker: ticker
session: session
Returns:
Session of start_time and end_time
Examples:
>>> get_interval('005490 KS Equity', 'day_open_30')
Session(start_time='09:00', end_time='09:30')
>>> get_interval('005490 KS Equity', 'day_normal_30_20')
Session(start_time='09:31', end_time='15:00')
>>> get_interval('005490 KS Equity', 'day_close_20')
Session(start_time='15:01', end_time='15:20')
>>> get_interval('700 HK Equity', 'am_open_30')
Session(start_time='09:30', end_time='10:00')
>>> get_interval('700 HK Equity', 'am_normal_30_30')
Session(start_time='10:01', end_time='11:30')
>>> get_interval('700 HK Equity', 'am_close_30')
Session(start_time='11:31', end_time='12:00')
>>> get_interval('ES1 Index', 'day_exact_2130_2230')
Session(start_time=None, end_time=None)
>>> get_interval('ES1 Index', 'allday_exact_2130_2230')
Session(start_time='21:30', end_time='22:30')
>>> get_interval('ES1 Index', 'allday_exact_2130_0230')
Session(start_time='21:30', end_time='02:30')
>>> get_interval('AMLP US', 'day_open_30')
Session(start_time=None, end_time=None)
>>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA
True
>>> get_interval('Z 1 Index', 'allday_normal_30_30')
Session(start_time='01:31', end_time='20:30')
>>> get_interval('GBP Curncy', 'day')
Session(start_time='17:02', end_time='17:00')
"""
if '_' not in session:
session = f'{session}_normal_0_0'
interval = Intervals(ticker=ticker)
ss_info = session.split('_')
return getattr(interval, f'market_{ss_info.pop(1)}')(*ss_info)
|
def shift_time(start_time, mins) -> str:
"""
Shift start time by mins
Args:
start_time: start time in terms of HH:MM string
mins: number of minutes (+ / -)
Returns:
end time in terms of HH:MM string
"""
s_time = pd.Timestamp(start_time)
e_time = s_time + np.sign(mins) * pd.Timedelta(f'00:{abs(mins)}:00')
return e_time.strftime('%H:%M')
|
def market_open(self, session, mins) -> Session:
"""
Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
start_time = self.exch[session][0]
return Session(start_time, shift_time(start_time, int(mins)))
|
def market_close(self, session, mins) -> Session:
"""
Time intervals for market close
Args:
session: [allday, day, am, pm, night]
mins: mintues before close
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
end_time = self.exch[session][-1]
return Session(shift_time(end_time, -int(mins) + 1), end_time)
|
def market_normal(self, session, after_open, before_close) -> Session:
"""
Time intervals between market
Args:
session: [allday, day, am, pm, night]
after_open: mins after open
before_close: mins before close
Returns:
Session of start_time and end_time
"""
logger = logs.get_logger(self.market_normal)
if session not in self.exch: return SessNA
ss = self.exch[session]
s_time = shift_time(ss[0], int(after_open) + 1)
e_time = shift_time(ss[-1], -int(before_close))
request_cross = pd.Timestamp(s_time) >= pd.Timestamp(e_time)
session_cross = pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1])
if request_cross and (not session_cross):
logger.warning(f'end time {e_time} is earlier than {s_time} ...')
return SessNA
return Session(s_time, e_time)
|
def market_exact(self, session, start_time: str, end_time: str) -> Session:
"""
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
ss = self.exch[session]
same_day = ss[0] < ss[-1]
if not start_time: s_time = ss[0]
else:
s_time = param.to_hour(start_time)
if same_day: s_time = max(s_time, ss[0])
if not end_time: e_time = ss[-1]
else:
e_time = param.to_hour(end_time)
if same_day: e_time = min(e_time, ss[-1])
if same_day and (s_time > e_time): return SessNA
return Session(start_time=s_time, end_time=e_time)
|
def get_tz(tz) -> str:
"""
Convert tz from ticker / shorthands to timezone
Args:
tz: ticker or timezone shorthands
Returns:
str: Python timzone
Examples:
>>> get_tz('NY')
'America/New_York'
>>> get_tz(TimeZone.NY)
'America/New_York'
>>> get_tz('BHP AU Equity')
'Australia/Sydney'
"""
from xbbg.const import exch_info
if tz is None: return DEFAULT_TZ
to_tz = tz
if isinstance(tz, str):
if hasattr(TimeZone, tz):
to_tz = getattr(TimeZone, tz)
else:
exch = exch_info(ticker=tz)
if 'tz' in exch.index:
to_tz = exch.tz
return to_tz
|
def tz_convert(dt, to_tz, from_tz=None) -> str:
"""
Convert to tz
Args:
dt: date time
to_tz: to tz
from_tz: from tz - will be ignored if tz from dt is given
Returns:
str: date & time
Examples:
>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')
>>> tz_convert(dt_1, to_tz='NY')
'2018-09-10 04:00:00-04:00'
>>> dt_2 = pd.Timestamp('2018-01-10 16:00')
>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')
'2018-01-11 05:00:00+08:00'
>>> dt_3 = '2018-09-10 15:00'
>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')
'2018-09-10 02:00:00-04:00'
"""
logger = logs.get_logger(tz_convert, level='info')
f_tz, t_tz = get_tz(from_tz), get_tz(to_tz)
from_dt = pd.Timestamp(str(dt), tz=f_tz)
logger.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...')
return str(pd.Timestamp(str(from_dt), tz=t_tz))
|
def missing_info(**kwargs) -> str:
"""
Full infomation for missing query
"""
func = kwargs.pop('func', 'unknown')
if 'ticker' in kwargs: kwargs['ticker'] = kwargs['ticker'].replace('/', '_')
info = utils.to_str(kwargs, fmt='{value}', sep='/')[1:-1]
return f'{func}/{info}'
|
def current_missing(**kwargs) -> int:
"""
Check number of trials for missing values
Returns:
int: number of trials already tried
"""
data_path = os.environ.get(BBG_ROOT, '').replace('\\', '/')
if not data_path: return 0
return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))
|
def update_missing(**kwargs):
"""
Update number of trials for missing values
"""
data_path = os.environ.get(BBG_ROOT, '').replace('\\', '/')
if not data_path: return
if len(kwargs) == 0: return
log_path = f'{data_path}/Logs/{missing_info(**kwargs)}'
cnt = len(files.all_files(log_path)) + 1
files.create_folder(log_path)
open(f'{log_path}/{cnt}.log', 'a').close()
|
def public(function):
"""
Decorator for public views that do not require authentication
Sets an attribute in the fuction STRONGHOLD_IS_PUBLIC to True
"""
orig_func = function
while isinstance(orig_func, partial):
orig_func = orig_func.func
set_view_func_public(orig_func)
return function
|
def custom_req(session, request):
"""
Utility for sending a predefined request and printing response as well
as storing messages in a list, useful for testing
Parameters
----------
session: blpapi.session.Session
request: blpapi.request.Request
Request to be sent
Returns
-------
List of all messages received
"""
# flush event queue in case previous call errored out
while(session.tryNextEvent()):
pass
print("Sending Request:\n %s" % request)
session.sendRequest(request)
messages = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = session.nextEvent(500)
for msg in ev:
print("Message Received:\n %s" % msg)
messages.append(msg)
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return messages
|
def to_dict_list(mystr):
"""
Translate a string representation of a Bloomberg Open API Request/Response
into a list of dictionaries.return
Parameters
----------
mystr: str
A string representation of one or more blpapi.request.Request or
blp.message.Message, these should be '\\n' seperated
"""
res = _parse(mystr)
dicts = []
for res_dict in res:
dicts.append(res_dict.asDict())
return dicts
|
def bopen(**kwargs):
"""
Open and manage a BCon wrapper to a Bloomberg API session
Parameters
----------
**kwargs:
Keyword arguments passed into pdblp.BCon initialization
"""
con = BCon(**kwargs)
con.start()
try:
yield con
finally:
con.stop()
|
def start(self):
"""
Start connection and initialize session services
"""
# flush event queue in defensive way
logger = _get_logger(self.debug)
started = self._session.start()
if started:
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SESSION_STATUS:
raise RuntimeError('Expected a "SESSION_STATUS" event but '
'received a {!r}'.format(ev_name))
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SESSION_STATUS:
raise RuntimeError('Expected a "SESSION_STATUS" event but '
'received a {!r}'.format(ev_name))
else:
ev = self._session.nextEvent(self.timeout)
if ev.eventType() == blpapi.Event.SESSION_STATUS:
for msg in ev:
logger.warning('Message Received:\n{}'.format(msg))
raise ConnectionError('Could not start blpapi.Session')
self._init_services()
return self
|
def _init_services(self):
"""
Initialize blpapi.Session services
"""
logger = _get_logger(self.debug)
# flush event queue in defensive way
opened = self._session.openService('//blp/refdata')
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SERVICE_STATUS:
raise RuntimeError('Expected a "SERVICE_STATUS" event but '
'received a {!r}'.format(ev_name))
if not opened:
logger.warning('Failed to open //blp/refdata')
raise ConnectionError('Could not open a //blp/refdata service')
self.refDataService = self._session.getService('//blp/refdata')
opened = self._session.openService('//blp/exrsvc')
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SERVICE_STATUS:
raise RuntimeError('Expected a "SERVICE_STATUS" event but '
'received a {!r}'.format(ev_name))
if not opened:
logger.warning('Failed to open //blp/exrsvc')
raise ConnectionError('Could not open a //blp/exrsvc service')
self.exrService = self._session.getService('//blp/exrsvc')
return self
|
def bdh(self, tickers, flds, start_date, end_date, elms=None,
ovrds=None, longdata=False):
"""
Get tickers and fields, return pandas DataFrame with columns as
MultiIndex with levels "ticker" and "field" and indexed by "date".
If long data is requested return DataFrame with columns
["date", "ticker", "field", "value"].
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")].
Refer to the HistoricalDataRequest section in the
'Services & schemas reference guide' for more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
ovrds = [] if not ovrds else ovrds
elms = [] if not elms else elms
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = pd.DataFrame(data, columns=['date', 'ticker', 'field', 'value'])
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df
|
def ref(self, tickers, flds, ovrds=None):
"""
Make a reference data request, get tickers and fields, return long
pandas DataFrame with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
Example
-------
>>> import pdblp
>>> con = pdblp.BCon()
>>> con.start()
>>> con.ref("CL1 Comdty", ["FUT_GEN_MONTH"])
Notes
-----
This returns reference data which has singleton values. In raw format
the messages passed back contain data of the form
fieldData = {
FUT_GEN_MONTH = "FGHJKMNQUVXZ"
}
"""
ovrds = [] if not ovrds else ovrds
logger = _get_logger(self.debug)
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
request = self._create_req('ReferenceDataRequest', tickers, flds,
ovrds, [])
logger.info('Sending Request:\n{}'.format(request))
self._session.sendRequest(request, identity=self._identity)
data = self._parse_ref(flds)
data = pd.DataFrame(data)
data.columns = ['ticker', 'field', 'value']
return data
|
def bulkref_hist(self, tickers, flds, dates, ovrds=None,
date_field='REFERENCE_DATE'):
"""
Make iterative calls to bulkref() and create a long DataFrame with
columns [date, ticker, field, name, value, position] where each date
corresponds to overriding a historical data override field.
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
dates: list
list of date strings in the format YYYYmmdd
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value. This should not include the date_field which will
be iteratively overridden
date_field: str
Field to iteratively override for requesting historical data,
e.g. REFERENCE_DATE, CURVE_DATE, etc.
Example
-------
>>> import pdblp
>>> con = pdblp.BCon()
>>> con.start()
>>> dates = ["20160625", "20160626"]
>>> con.bulkref_hist("BVIS0587 Index", "CURVE_TENOR_RATES", dates,
... date_field="CURVE_DATE")
"""
ovrds = [] if not ovrds else ovrds
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
self._send_hist(tickers, flds, dates, date_field, ovrds)
data = self._parse_bulkref(flds, keep_corrId=True,
sent_events=len(dates))
data = pd.DataFrame(data)
data.columns = ['ticker', 'field', 'name', 'value', 'position', 'date']
data = data.sort_values(by=['date', 'position']).reset_index(drop=True)
data = data.loc[:, ['date', 'ticker', 'field', 'name',
'value', 'position']]
return data
|
def bdib(self, ticker, start_datetime, end_datetime, event_type, interval,
elms=None):
"""
Get Open, High, Low, Close, Volume, and numEvents for a ticker.
Return pandas DataFrame
Parameters
----------
ticker: string
String corresponding to ticker
start_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
end_datetime: string
UTC datetime in format YYYY-mm-ddTHH:MM:SS
event_type: string {TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID,
BEST_ASK}
Requested data event type
interval: int {1... 1440}
Length of time bars
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set. Refer to the IntradayBarRequest section in the
'Services & schemas reference guide' for more info on these values
"""
elms = [] if not elms else elms
# flush event queue in case previous call errored out
logger = _get_logger(self.debug)
while(self._session.tryNextEvent()):
pass
# Create and fill the request for the historical data
request = self.refDataService.createRequest('IntradayBarRequest')
request.set('security', ticker)
request.set('eventType', event_type)
request.set('interval', interval) # bar interval in minutes
request.set('startDateTime', start_datetime)
request.set('endDateTime', end_datetime)
for name, val in elms:
request.set(name, val)
logger.info('Sending Request:\n{}'.format(request))
# Send the request
self._session.sendRequest(request, identity=self._identity)
# Process received events
data = []
flds = ['open', 'high', 'low', 'close', 'volume', 'numEvents']
for msg in self._receive_events():
d = msg['element']['IntradayBarResponse']
for bar in d['barData']['barTickData']:
data.append(bar['barTickData'])
data = pd.DataFrame(data).set_index('time').sort_index().loc[:, flds]
return data
|
def bsrch(self, domain):
"""
This function uses the Bloomberg API to retrieve 'bsrch' (Bloomberg
SRCH Data) queries. Returns list of tickers.
Parameters
----------
domain: string
A character string with the name of the domain to execute.
It can be a user defined SRCH screen, commodity screen or
one of the variety of Bloomberg examples. All domains are in the
format <domain>:<search_name>. Example "COMDTY:NGFLOW"
Returns
-------
data: pandas.DataFrame
List of bloomberg tickers from the BSRCH
"""
logger = _get_logger(self.debug)
request = self.exrService.createRequest('ExcelGetGridRequest')
request.set('Domain', domain)
logger.info('Sending Request:\n{}'.format(request))
self._session.sendRequest(request, identity=self._identity)
data = []
for msg in self._receive_events(to_dict=False):
for v in msg.getElement("DataRecords").values():
for f in v.getElement("DataFields").values():
data.append(f.getElementAsString("StringValue"))
return pd.DataFrame(data)
|
def assemble_one(asmcode, pc=0, fork=DEFAULT_FORK):
""" Assemble one EVM instruction from its textual representation.
:param asmcode: assembly code for one instruction
:type asmcode: str
:param pc: program counter of the instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: An Instruction object
:rtype: Instruction
Example use::
>>> print assemble_one('LT')
"""
try:
instruction_table = instruction_tables[fork]
asmcode = asmcode.strip().split(' ')
instr = instruction_table[asmcode[0].upper()]
if pc:
instr.pc = pc
if instr.operand_size > 0:
assert len(asmcode) == 2
instr.operand = int(asmcode[1], 0)
return instr
except:
raise AssembleError("Something wrong at pc %d" % pc)
|
def assemble_all(asmcode, pc=0, fork=DEFAULT_FORK):
""" Assemble a sequence of textual representation of EVM instructions
:param asmcode: assembly code for any number of instructions
:type asmcode: str
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: An generator of Instruction objects
:rtype: generator[Instructions]
Example use::
>>> assemble_one('''PUSH1 0x60\n \
PUSH1 0x40\n \
MSTORE\n \
PUSH1 0x2\n \
PUSH2 0x108\n \
PUSH1 0x0\n \
POP\n \
SSTORE\n \
PUSH1 0x40\n \
MLOAD\n \
''')
"""
asmcode = asmcode.split('\n')
asmcode = iter(asmcode)
for line in asmcode:
if not line.strip():
continue
instr = assemble_one(line, pc=pc, fork=fork)
yield instr
pc += instr.size
|
def disassemble_one(bytecode, pc=0, fork=DEFAULT_FORK):
""" Disassemble a single instruction from a bytecode
:param bytecode: the bytecode stream
:type bytecode: str | bytes | bytearray | iterator
:param pc: program counter of the instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: an Instruction object
:rtype: Instruction
Example use::
>>> print disassemble_one('\x60\x10')
"""
instruction_table = instruction_tables[fork]
if isinstance(bytecode, bytes):
bytecode = bytearray(bytecode)
if isinstance(bytecode, str):
bytecode = bytearray(bytecode.encode('latin-1'))
bytecode = iter(bytecode)
try:
opcode = next(bytecode)
except StopIteration:
return
assert isinstance(opcode, int)
instruction = copy.copy(instruction_table.get(opcode, None))
if instruction is None:
instruction = Instruction(opcode, 'INVALID', 0, 0, 0, 0, 'Unspecified invalid instruction.')
instruction.pc = pc
try:
if instruction.has_operand:
instruction.parse_operand(bytecode)
except ParseError:
instruction = None
finally:
return instruction
|
def disassemble_all(bytecode, pc=0, fork=DEFAULT_FORK):
""" Disassemble all instructions in bytecode
:param bytecode: an evm bytecode (binary)
:type bytecode: str | bytes | bytearray | iterator
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: An generator of Instruction objects
:rtype: list[Instruction]
Example use::
>>> for inst in disassemble_all(bytecode):
... print(instr)
...
PUSH1 0x60
PUSH1 0x40
MSTORE
PUSH1 0x2
PUSH2 0x108
PUSH1 0x0
POP
SSTORE
PUSH1 0x40
MLOAD
"""
if isinstance(bytecode, bytes):
bytecode = bytearray(bytecode)
if isinstance(bytecode, str):
bytecode = bytearray(bytecode.encode('latin-1'))
bytecode = iter(bytecode)
while True:
instr = disassemble_one(bytecode, pc=pc, fork=fork)
if not instr:
return
pc += instr.size
yield instr
|
def disassemble(bytecode, pc=0, fork=DEFAULT_FORK):
""" Disassemble an EVM bytecode
:param bytecode: binary representation of an evm bytecode
:type bytecode: str | bytes | bytearray
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the text representation of the assembler code
Example use::
>>> disassemble("\x60\x60\x60\x40\x52\x60\x02\x61\x01\x00")
...
PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
"""
return '\n'.join(map(str, disassemble_all(bytecode, pc=pc, fork=fork)))
|
def assemble(asmcode, pc=0, fork=DEFAULT_FORK):
""" Assemble an EVM program
:param asmcode: an evm assembler program
:type asmcode: str
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the hex representation of the bytecode
:rtype: str
Example use::
>>> assemble('''PUSH1 0x60\n \
BLOCKHASH\n \
MSTORE\n \
PUSH1 0x2\n \
PUSH2 0x100\n \
''')
...
b"\x60\x60\x60\x40\x52\x60\x02\x61\x01\x00"
"""
return b''.join(x.bytes for x in assemble_all(asmcode, pc=pc, fork=fork))
|
def disassemble_hex(bytecode, pc=0, fork=DEFAULT_FORK):
""" Disassemble an EVM bytecode
:param bytecode: canonical representation of an evm bytecode (hexadecimal)
:type bytecode: str
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the text representation of the assembler code
:rtype: str
Example use::
>>> disassemble_hex("0x6060604052600261010")
...
PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
"""
if bytecode.startswith('0x'):
bytecode = bytecode[2:]
bytecode = unhexlify(bytecode)
return disassemble(bytecode, pc=pc, fork=fork)
|
def assemble_hex(asmcode, pc=0, fork=DEFAULT_FORK):
""" Assemble an EVM program
:param asmcode: an evm assembler program
:type asmcode: str | iterator[Instruction]
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the hex representation of the bytecode
:rtype: str
Example use::
>>> assemble_hex('''PUSH1 0x60\n \
BLOCKHASH\n \
MSTORE\n \
PUSH1 0x2\n \
PUSH2 0x100\n \
''')
...
"0x6060604052600261010"
"""
if isinstance(asmcode, list):
return '0x' + hexlify(b''.join([x.bytes for x in asmcode])).decode('ascii')
return '0x' + hexlify(assemble(asmcode, pc=pc, fork=fork)).decode('ascii')
|
def block_to_fork(block_number):
""" Convert block number to fork name.
:param block_number: block number
:type block_number: int
:return: fork name
:rtype: str
Example use::
>>> block_to_fork(0)
...
"frontier"
>>> block_to_fork(4370000)
...
"byzantium"
>>> block_to_fork(4370001)
...
"byzantium"
"""
forks_by_block = {
0: "frontier",
1150000: "homestead",
# 1920000 Dao
2463000: "tangerine_whistle",
2675000: "spurious_dragon",
4370000: "byzantium",
#7280000: "constantinople", # Same Block as petersburg, commented to avoid conflicts
7280000: "petersburg",
9999999: "serenity" # to be replaced after Serenity launch
}
fork_names = list(forks_by_block.values())
fork_blocks = list(forks_by_block.keys())
return fork_names[bisect(fork_blocks, block_number) - 1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.