idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
62,100
def packages ( self , login = None , platform = None , package_type = None , type_ = None , access = None ) : logger . debug ( '' ) method = self . _anaconda_client_api . user_packages return self . _create_worker ( method , login = login , platform = platform , package_type = package_type , type_ = type_ , access = access )
Return all the available packages for a given user .
62,101
def country ( from_key = 'name' , to_key = 'iso' ) : gc = GeonamesCache ( ) dataset = gc . get_dataset_by_key ( gc . get_countries ( ) , from_key ) def mapper ( input ) : if 'name' == from_key : input = mappings . country_names . get ( input , input ) item = dataset . get ( input ) if item : return item [ to_key ] return mapper
Creates and returns a mapper function to access country data .
62,102
def get_cities ( self ) : if self . cities is None : self . cities = self . _load_data ( self . cities , 'cities.json' ) return self . cities
Get a dictionary of cities keyed by geonameid .
62,103
def get_cities_by_name ( self , name ) : if name not in self . cities_by_names : if self . cities_items is None : self . cities_items = list ( self . get_cities ( ) . items ( ) ) self . cities_by_names [ name ] = [ dict ( { gid : city } ) for gid , city in self . cities_items if city [ 'name' ] == name ] return self . cities_by_names [ name ]
Get a list of city dictionaries with the given name .
62,104
def _set_repo_urls_from_channels ( self , channels ) : repos = [ ] sys_platform = self . _conda_api . get_platform ( ) for channel in channels : url = '{0}/{1}/repodata.json.bz2' . format ( channel , sys_platform ) repos . append ( url ) return repos
Convert a channel into a normalized repo name including .
62,105
def _check_repos ( self , repos ) : self . _checking_repos = [ ] self . _valid_repos = [ ] for repo in repos : worker = self . download_is_valid_url ( repo ) worker . sig_finished . connect ( self . _repos_checked ) worker . repo = repo self . _checking_repos . append ( repo )
Check if repodata urls are valid .
62,106
def _repos_checked ( self , worker , output , error ) : if worker . repo in self . _checking_repos : self . _checking_repos . remove ( worker . repo ) if output : self . _valid_repos . append ( worker . repo ) if len ( self . _checking_repos ) == 0 : self . _download_repodata ( self . _valid_repos )
Callback for _check_repos .
62,107
def _repo_url_to_path ( self , repo ) : repo = repo . replace ( 'http://' , '' ) repo = repo . replace ( 'https://' , '' ) repo = repo . replace ( '/' , '_' ) return os . sep . join ( [ self . _data_directory , repo ] )
Convert a repo url to a file path for local storage .
62,108
def _download_repodata ( self , checked_repos ) : self . _files_downloaded = [ ] self . _repodata_files = [ ] self . __counter = - 1 if checked_repos : for repo in checked_repos : path = self . _repo_url_to_path ( repo ) self . _files_downloaded . append ( path ) self . _repodata_files . append ( path ) worker = self . download_async ( repo , path ) worker . url = repo worker . path = path worker . sig_finished . connect ( self . _repodata_downloaded ) else : path = self . _get_repodata_from_meta ( ) self . _repodata_files = [ path ] self . _repodata_downloaded ( )
Dowload repodata .
62,109
def _get_repodata_from_meta ( self ) : path = os . sep . join ( [ self . ROOT_PREFIX , 'conda-meta' ] ) packages = os . listdir ( path ) meta_repodata = { } for pkg in packages : if pkg . endswith ( '.json' ) : filepath = os . sep . join ( [ path , pkg ] ) with open ( filepath , 'r' ) as f : data = json . load ( f ) if 'files' in data : data . pop ( 'files' ) if 'icondata' in data : data . pop ( 'icondata' ) name = pkg . replace ( '.json' , '' ) meta_repodata [ name ] = data meta_repodata_path = os . sep . join ( [ self . _data_directory , 'offline.json' ] ) repodata = { 'info' : [ ] , 'packages' : meta_repodata } with open ( meta_repodata_path , 'w' ) as f : json . dump ( repodata , f , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) return meta_repodata_path
Generate repodata from local meta files .
62,110
def _repodata_downloaded ( self , worker = None , output = None , error = None ) : if worker : self . _files_downloaded . remove ( worker . path ) if worker . path in self . _files_downloaded : self . _files_downloaded . remove ( worker . path ) if len ( self . _files_downloaded ) == 0 : self . sig_repodata_updated . emit ( list ( set ( self . _repodata_files ) ) )
Callback for _download_repodata .
62,111
def repodata_files ( self , channels = None ) : if channels is None : channels = self . conda_get_condarc_channels ( ) repodata_urls = self . _set_repo_urls_from_channels ( channels ) repopaths = [ ] for repourl in repodata_urls : fullpath = os . sep . join ( [ self . _repo_url_to_path ( repourl ) ] ) repopaths . append ( fullpath ) return repopaths
Return the repodata paths based on channels and the data_directory .
62,112
def update_repodata ( self , channels = None ) : norm_channels = self . conda_get_condarc_channels ( channels = channels , normalize = True ) repodata_urls = self . _set_repo_urls_from_channels ( norm_channels ) self . _check_repos ( repodata_urls )
Update repodata from channels or use condarc channels if None .
62,113
def update_metadata ( self ) : if self . _data_directory is None : raise Exception ( 'Need to call `api.set_data_directory` first.' ) metadata_url = 'https://repo.continuum.io/pkgs/metadata.json' filepath = os . sep . join ( [ self . _data_directory , 'metadata.json' ] ) worker = self . download_requests ( metadata_url , filepath ) return worker
Update the metadata available for packages in repo . continuum . io .
62,114
def check_valid_channel ( self , channel , conda_url = 'https://conda.anaconda.org' ) : if channel . startswith ( 'https://' ) or channel . startswith ( 'http://' ) : url = channel else : url = "{0}/{1}" . format ( conda_url , channel ) if url [ - 1 ] == '/' : url = url [ : - 1 ] plat = self . conda_platform ( ) repodata_url = "{0}/{1}/{2}" . format ( url , plat , 'repodata.json' ) worker = self . download_is_valid_url ( repodata_url ) worker . url = url return worker
Check if channel is valid .
62,115
def _aws_get_instance_by_tag ( region , name , tag , raw ) : client = boto3 . session . Session ( ) . client ( 'ec2' , region ) matching_reservations = client . describe_instances ( Filters = [ { 'Name' : tag , 'Values' : [ name ] } ] ) . get ( 'Reservations' , [ ] ) instances = [ ] [ [ instances . append ( _aws_instance_from_dict ( region , instance , raw ) ) for instance in reservation . get ( 'Instances' ) ] for reservation in matching_reservations if reservation ] return instances
Get all instances matching a tag .
62,116
def aws_get_instances_by_id ( region , instance_id , raw = True ) : client = boto3 . session . Session ( ) . client ( 'ec2' , region ) try : matching_reservations = client . describe_instances ( InstanceIds = [ instance_id ] ) . get ( 'Reservations' , [ ] ) except ClientError as exc : if exc . response . get ( 'Error' , { } ) . get ( 'Code' ) != 'InvalidInstanceID.NotFound' : raise return [ ] instances = [ ] [ [ instances . append ( _aws_instance_from_dict ( region , instance , raw ) ) for instance in reservation . get ( 'Instances' ) ] for reservation in matching_reservations if reservation ] return instances
Returns instances mathing an id .
62,117
def get_instances_by_name ( name , sort_by_order = ( 'cloud' , 'name' ) , projects = None , raw = True , regions = None , gcp_credentials = None , clouds = SUPPORTED_CLOUDS ) : matching_instances = all_clouds_get_instances_by_name ( name , projects , raw , credentials = gcp_credentials , clouds = clouds ) if regions : matching_instances = [ instance for instance in matching_instances if instance . region in regions ] matching_instances . sort ( key = lambda instance : [ getattr ( instance , field ) for field in sort_by_order ] ) return matching_instances
Get intsances from GCP and AWS by name .
62,118
def get_os_version ( instance ) : if instance . cloud == 'aws' : client = boto3 . client ( 'ec2' , instance . region ) image_id = client . describe_instances ( InstanceIds = [ instance . id ] ) [ 'Reservations' ] [ 0 ] [ 'Instances' ] [ 0 ] [ 'ImageId' ] return '16.04' if '16.04' in client . describe_images ( ImageIds = [ image_id ] ) [ 'Images' ] [ 0 ] [ 'Name' ] else '14.04' if instance . cloud == 'gcp' : credentials = GoogleCredentials . get_application_default ( ) compute = discovery . build ( 'compute' , 'v1' , credentials = credentials ) for disk in compute . instances ( ) . get ( instance = instance . name , zone = instance . zone , project = instance . project ) . execute ( ) [ 'disks' ] : if not disk . get ( 'boot' ) : continue for value in disk . get ( 'licenses' , [ ] ) : if '1604' in value : return '16.04' if '1404' in value : return '14.04' return '14.04' return '14.04'
Get OS Version for instances .
62,119
def get_volumes ( instance ) : if instance . cloud == 'aws' : client = boto3 . client ( 'ec2' , instance . region ) devices = client . describe_instance_attribute ( InstanceId = instance . id , Attribute = 'blockDeviceMapping' ) . get ( 'BlockDeviceMappings' , [ ] ) volumes = client . describe_volumes ( VolumeIds = [ device [ 'Ebs' ] [ 'VolumeId' ] for device in devices if device . get ( 'Ebs' , { } ) . get ( 'VolumeId' ) ] ) . get ( 'Volumes' , [ ] ) return { volume [ 'Attachments' ] [ 0 ] [ 'Device' ] : { 'size' : volume [ 'Size' ] , 'volume_type' : volume [ 'VolumeType' ] } for volume in volumes } if instance . cloud == 'gcp' : credentials = GoogleCredentials . get_application_default ( ) compute = discovery . build ( 'compute' , 'v1' , credentials = credentials ) volumes = { } for disk in compute . instances ( ) . get ( instance = instance . id , zone = instance . zone , project = instance . project ) . execute ( ) [ 'disks' ] : index = disk [ 'index' ] name = disk [ 'deviceName' ] if disk [ 'deviceName' ] not in [ u'persistent-disk-0' , 'boot' ] else instance . id if 'local-ssd' in disk [ 'deviceName' ] : size = 375.0 if 'local-ssd' in disk [ 'deviceName' ] : size = 375.0 disk_type = 'local-ssd' else : disk_data = compute . disks ( ) . get ( disk = name , zone = instance . zone , project = instance . project ) . execute ( ) size = float ( disk_data [ 'sizeGb' ] ) disk_type = 'pd-ssd' volumes [ index ] = { 'size' : size , 'type' : disk [ 'type' ] , 'deviceName' : disk [ 'deviceName' ] , 'interface' : disk [ 'interface' ] , 'diskType' : disk_type } return volumes raise ValueError ( 'Unknown cloud %s' % instance . cloud )
Returns all the volumes of an instance .
62,120
def get_persistent_address ( instance ) : if instance . cloud == 'aws' : client = boto3 . client ( 'ec2' , instance . region ) try : client . describe_addresses ( PublicIps = [ instance . ip_address ] ) return instance . ip_address except botocore . client . ClientError as exc : if exc . response . get ( 'Error' , { } ) . get ( 'Code' ) != 'InvalidAddress.NotFound' : raise return None if instance . cloud == 'gcp' : credentials = GoogleCredentials . get_application_default ( ) compute = discovery . build ( 'compute' , 'v1' , credentials = credentials ) try : return compute . addresses ( ) . get ( address = instance . name , project = instance . project , region = instance . region ) . execute ( ) [ 'address' ] except errors . HttpError as exc : if 'was not found' in str ( exc ) : return None raise raise ValueError ( 'Unknown cloud %s' % instance . cloud )
Returns the public ip address of an instance .
62,121
def main ( ) : pip_packages = { } for package in pip . get_installed_distributions ( ) : name = package . project_name version = package . version full_name = "{0}-{1}-pip" . format ( name . lower ( ) , version ) pip_packages [ full_name ] = { 'version' : version } data = json . dumps ( pip_packages ) print ( data )
Use pip to find pip installed packages in a given prefix .
62,122
def _save ( file , data , mode = 'w+' ) : with open ( file , mode ) as fh : fh . write ( data )
Write all data to created file . Also overwrite previous file .
62,123
def merge ( obj ) : merge = '' for f in obj . get ( 'static' , [ ] ) : print 'Merging: {}' . format ( f ) merge += _read ( f ) def doless ( f ) : print 'Compiling LESS: {}' . format ( f ) ret , tmp = commands . getstatusoutput ( 'lesscpy ' + f ) if ret == 0 : return tmp else : print 'LESS to CSS failed for: {} (Do you have lesscpy installed?)' . format ( f ) return '' if merger . get ( 'config' ) : import re for p in merger [ 'path' ] : sys . path . append ( p ) os . environ . setdefault ( "DJANGO_SETTINGS_MODULE" , merger [ 'config' ] ) try : from django . template . loader import get_template_from_string from django . template . base import Context from django . utils . encoding import smart_str from django . conf import settings except : print 'Do you really have django well installed?' sys . exit ( 1 ) for f in obj . get ( 'template' , [ ] ) : print 'Merging django template: {}' . format ( f ) t = _read ( f ) if settings . FORCE_SCRIPT_NAME : t = re . sub ( r'\{%\s+url\b' , settings . FORCE_SCRIPT_NAME + '{% url ' , t ) tmp = smart_str ( get_template_from_string ( t ) . render ( Context ( { } ) ) ) if f . endswith ( '.less' ) : pass merge += tmp for f in obj . get ( 'less' , [ ] ) : merge += doless ( f ) return merge
Merge contents .
62,124
def jsMin ( data , file ) : print 'Minifying JS... ' , url = 'http://javascript-minifier.com/raw' req = urllib2 . Request ( url , urllib . urlencode ( { 'input' : data } ) ) try : f = urllib2 . urlopen ( req ) response = f . read ( ) f . close ( ) print 'Final: {:.1f}%' . format ( 100.0 * len ( response ) / len ( data ) ) print 'Saving: {} ({:.2f}kB)' . format ( file , len ( response ) / 1024.0 ) _save ( file , response ) except : print 'Oops!! Failed :(' return 1 return 0
Minify JS data and saves to file .
62,125
def jpgMin ( file , force = False ) : if not os . path . isfile ( file + '.original' ) or force : data = _read ( file , 'rb' ) _save ( file + '.original' , data , 'w+b' ) print 'Optmising JPG {} - {:.2f}kB' . format ( file , len ( data ) / 1024.0 ) , url = 'http://jpgoptimiser.com/optimise' parts , headers = encode_multipart ( { } , { 'input' : { 'filename' : 'wherever.jpg' , 'content' : data } } ) req = urllib2 . Request ( url , data = parts , headers = headers ) try : f = urllib2 . urlopen ( req ) response = f . read ( ) f . close ( ) print ' - {:.2f} - {:.1f}%' . format ( len ( response ) / 1024.0 , 100.0 * len ( response ) / len ( data ) ) _save ( file , response , 'w+b' ) except : print 'Oops!! Failed :(' return 1 else : print 'Ignoring file: {}' . format ( file ) return 0
Try to optimise a JPG file .
62,126
def process ( obj ) : merged = merge ( obj ) if obj . get ( 'full' ) : print 'Saving: {} ({:.2f}kB)' . format ( obj [ 'full' ] , len ( merged ) / 1024.0 ) _save ( obj [ 'full' ] , merged ) else : print 'Full merged size: {:.2f}kB' . format ( len ( merged ) / 1024.0 ) if obj . get ( 'jsmin' ) : jsMin ( merged , obj [ 'jsmin' ] ) if obj . get ( 'cssmin' ) : cssMin ( merged , obj [ 'cssmin' ] )
Process each block of the merger object .
62,127
def optimize ( exp_rets , covs ) : _cov_inv = np . linalg . inv ( covs ) _u = np . ones ( ( len ( exp_rets ) ) ) _u_cov_inv = _u . dot ( _cov_inv ) _rets_cov_inv = exp_rets . dot ( _cov_inv ) _m = np . empty ( ( 2 , 2 ) ) _m [ 0 , 0 ] = _rets_cov_inv . dot ( exp_rets ) _m [ 0 , 1 ] = _u_cov_inv . dot ( exp_rets ) _m [ 1 , 0 ] = _rets_cov_inv . dot ( _u ) _m [ 1 , 1 ] = _u_cov_inv . dot ( _u ) _m_inv = np . linalg . inv ( _m ) a = _m_inv [ 0 , 0 ] * _rets_cov_inv + _m_inv [ 1 , 0 ] * _u_cov_inv b = _m_inv [ 0 , 1 ] * _rets_cov_inv + _m_inv [ 1 , 1 ] * _u_cov_inv least_risk_ret = _m [ 0 , 1 ] / _m [ 1 , 1 ] return a , b , least_risk_ret
Return parameters for portfolio optimization .
62,128
def growthfromrange ( rangegrowth , startdate , enddate ) : _yrs = ( pd . Timestamp ( enddate ) - pd . Timestamp ( startdate ) ) . total_seconds ( ) / dt . timedelta ( 365.25 ) . total_seconds ( ) return yrlygrowth ( rangegrowth , _yrs )
Annual growth given growth from start date to end date .
62,129
def equities ( country = 'US' ) : nasdaqblob , otherblob = _getrawdata ( ) eq_triples = [ ] eq_triples . extend ( _get_nas_triples ( nasdaqblob ) ) eq_triples . extend ( _get_other_triples ( otherblob ) ) eq_triples . sort ( ) index = [ triple [ 0 ] for triple in eq_triples ] data = [ triple [ 1 : ] for triple in eq_triples ] return pd . DataFrame ( data , index , columns = [ 'Security Name' , 'Exchange' ] , dtype = str )
Return a DataFrame of current US equities .
62,130
def straddle ( self , strike , expiry ) : _rows = { } _prices = { } for _opttype in _constants . OPTTYPES : _rows [ _opttype ] = _relevant_rows ( self . data , ( strike , expiry , _opttype , ) , "No key for {} strike {} {}" . format ( expiry , strike , _opttype ) ) _prices [ _opttype ] = _getprice ( _rows [ _opttype ] ) _eq = _rows [ _constants . OPTTYPES [ 0 ] ] . loc [ : , 'Underlying_Price' ] . values [ 0 ] _qt = _rows [ _constants . OPTTYPES [ 0 ] ] . loc [ : , 'Quote_Time' ] . values [ 0 ] _index = [ 'Call' , 'Put' , 'Credit' , 'Underlying_Price' , 'Quote_Time' ] _vals = np . array ( [ _prices [ 'call' ] , _prices [ 'put' ] , _prices [ 'call' ] + _prices [ 'put' ] , _eq , _qt ] ) return pd . DataFrame ( _vals , index = _index , columns = [ 'Value' ] )
Metrics for evaluating a straddle .
62,131
def get ( equity ) : _optmeta = pdr . data . Options ( equity , 'yahoo' ) _optdata = _optmeta . get_all_data ( ) return Options ( _optdata )
Retrieve all current options chains for given equity .
62,132
def transform ( data_frame , ** kwargs ) : norm = kwargs . get ( 'norm' , 1.0 ) axis = kwargs . get ( 'axis' , 0 ) if axis == 0 : norm_vector = _get_norms_of_rows ( data_frame , kwargs . get ( 'method' , 'vector' ) ) else : norm_vector = _get_norms_of_cols ( data_frame , kwargs . get ( 'method' , 'first' ) ) if 'labels' in kwargs : if axis == 0 : return data_frame . apply ( lambda col : col * norm / norm_vector , axis = 0 ) , kwargs [ 'labels' ] . apply ( lambda col : col * norm / norm_vector , axis = 0 ) else : raise ValueError ( "label normalization incompatible with normalization by column" ) else : if axis == 0 : return data_frame . apply ( lambda col : col * norm / norm_vector , axis = 0 ) else : return data_frame . apply ( lambda row : row * norm / norm_vector , axis = 1 )
Return a transformed DataFrame .
62,133
def _get_norms_of_rows ( data_frame , method ) : if method == 'vector' : norm_vector = np . linalg . norm ( data_frame . values , axis = 1 ) elif method == 'last' : norm_vector = data_frame . iloc [ : , - 1 ] . values elif method == 'mean' : norm_vector = np . mean ( data_frame . values , axis = 1 ) elif method == 'first' : norm_vector = data_frame . iloc [ : , 0 ] . values else : raise ValueError ( "no normalization method '{0}'" . format ( method ) ) return norm_vector
return a column vector containing the norm of each row
62,134
def get ( self , opttype , strike , expiry ) : _optrow = _relevant_rows ( self . data , ( strike , expiry , opttype , ) , "No key for {} strike {} {}" . format ( expiry , strike , opttype ) ) return _getprice ( _optrow )
Price as midpoint between bid and ask .
62,135
def metrics ( self , opttype , strike , expiry ) : _optrow = _relevant_rows ( self . data , ( strike , expiry , opttype , ) , "No key for {} strike {} {}" . format ( expiry , strike , opttype ) ) _index = [ 'Opt_Price' , 'Time_Val' , 'Last' , 'Bid' , 'Ask' , 'Vol' , 'Open_Int' , 'Underlying_Price' , 'Quote_Time' ] _out = pd . DataFrame ( index = _index , columns = [ 'Value' ] ) _out . loc [ 'Opt_Price' , 'Value' ] = _opt_price = _getprice ( _optrow ) for _name in _index [ 2 : ] : _out . loc [ _name , 'Value' ] = _optrow . loc [ : , _name ] . values [ 0 ] _eq_price = _out . loc [ 'Underlying_Price' , 'Value' ] if opttype == 'put' : _out . loc [ 'Time_Val' ] = _get_put_time_val ( _opt_price , strike , _eq_price ) else : _out . loc [ 'Time_Val' ] = _get_call_time_val ( _opt_price , strike , _eq_price ) return _out
Basic metrics for a specific option .
62,136
def strikes ( self , opttype , expiry ) : _relevant = _relevant_rows ( self . data , ( slice ( None ) , expiry , opttype , ) , "No key for {} {}" . format ( expiry , opttype ) ) _index = _relevant . index . get_level_values ( 'Strike' ) _columns = [ 'Price' , 'Time_Val' , 'Last' , 'Bid' , 'Ask' , 'Vol' , 'Open_Int' ] _df = pd . DataFrame ( index = _index , columns = _columns ) _underlying = _relevant . loc [ : , 'Underlying_Price' ] . values [ 0 ] _quotetime = pd . to_datetime ( _relevant . loc [ : , 'Quote_Time' ] . values [ 0 ] , utc = True ) . to_datetime ( ) for _col in _columns [ 2 : ] : _df . loc [ : , _col ] = _relevant . loc [ : , _col ] . values _df . loc [ : , 'Price' ] = ( _df . loc [ : , 'Bid' ] + _df . loc [ : , 'Ask' ] ) / 2. _set_tv_strike_ix ( _df , opttype , 'Price' , 'Time_Val' , _underlying ) return _df , _underlying , _quotetime
Retrieve option prices for all strikes of a given type with a given expiration .
62,137
def exps ( self , opttype , strike ) : _relevant = _relevant_rows ( self . data , ( strike , slice ( None ) , opttype , ) , "No key for {} {}" . format ( strike , opttype ) ) _index = _relevant . index . get_level_values ( 'Expiry' ) _columns = [ 'Price' , 'Time_Val' , 'Last' , 'Bid' , 'Ask' , 'Vol' , 'Open_Int' ] _df = pd . DataFrame ( index = _index , columns = _columns ) _eq = _relevant . loc [ : , 'Underlying_Price' ] . values [ 0 ] _qt = pd . to_datetime ( _relevant . loc [ : , 'Quote_Time' ] . values [ 0 ] , utc = True ) . to_datetime ( ) for _col in _columns [ 2 : ] : _df . loc [ : , _col ] = _relevant . loc [ : , _col ] . values _df . loc [ : , 'Price' ] = ( _df . loc [ : , 'Bid' ] + _df . loc [ : , 'Ask' ] ) / 2. _set_tv_other_ix ( _df , opttype , 'Price' , 'Time_Val' , _eq , strike ) return _df , _eq , _qt
Prices for given strike on all available dates .
62,138
def labeledfeatures ( eqdata , featurefunc , labelfunc ) : _size = len ( eqdata . index ) _labels , _skipatend = labelfunc ( eqdata ) _features , _skipatstart = featurefunc ( eqdata . iloc [ : ( _size - _skipatend ) , : ] ) return _features , _labels . iloc [ _skipatstart : , : ]
Return features and labels for the given equity data .
62,139
def growth ( interval , pricecol , eqdata ) : size = len ( eqdata . index ) labeldata = eqdata . loc [ : , pricecol ] . values [ interval : ] / eqdata . loc [ : , pricecol ] . values [ : ( size - interval ) ] df = pd . DataFrame ( data = labeldata , index = eqdata . index [ : ( size - interval ) ] , columns = [ 'Growth' ] , dtype = 'float64' ) return df
Retrieve growth labels .
62,140
def sma ( eqdata , ** kwargs ) : if len ( eqdata . shape ) > 1 and eqdata . shape [ 1 ] != 1 : _selection = kwargs . get ( 'selection' , 'Adj Close' ) _eqdata = eqdata . loc [ : , _selection ] else : _eqdata = eqdata _window = kwargs . get ( 'window' , 20 ) _outputcol = kwargs . get ( 'outputcol' , 'SMA' ) ret = pd . DataFrame ( index = _eqdata . index , columns = [ _outputcol ] , dtype = np . float64 ) ret . loc [ : , _outputcol ] = _eqdata . rolling ( window = _window , center = False ) . mean ( ) . values . flatten ( ) return ret
simple moving average
62,141
def ema ( eqdata , ** kwargs ) : if len ( eqdata . shape ) > 1 and eqdata . shape [ 1 ] != 1 : _selection = kwargs . get ( 'selection' , 'Adj Close' ) _eqdata = eqdata . loc [ : , _selection ] else : _eqdata = eqdata _span = kwargs . get ( 'span' , 20 ) _col = kwargs . get ( 'outputcol' , 'EMA' ) _emadf = pd . DataFrame ( index = _eqdata . index , columns = [ _col ] , dtype = np . float64 ) _emadf . loc [ : , _col ] = _eqdata . ewm ( span = _span , min_periods = 0 , adjust = True , ignore_na = False ) . mean ( ) . values . flatten ( ) return _emadf
Exponential moving average with the given span .
62,142
def ema_growth ( eqdata , ** kwargs ) : _growth_outputcol = kwargs . get ( 'outputcol' , 'EMA Growth' ) _ema_outputcol = 'EMA' kwargs [ 'outputcol' ] = _ema_outputcol _emadf = ema ( eqdata , ** kwargs ) return simple . growth ( _emadf , selection = _ema_outputcol , outputcol = _growth_outputcol )
Growth of exponential moving average .
62,143
def growth_volatility ( eqdata , ** kwargs ) : _window = kwargs . get ( 'window' , 20 ) _selection = kwargs . get ( 'selection' , 'Adj Close' ) _outputcol = kwargs . get ( 'outputcol' , 'Growth Risk' ) _growthdata = simple . growth ( eqdata , selection = _selection ) return volatility ( _growthdata , outputcol = _outputcol , window = _window )
Return the volatility of growth .
62,144
def ratio_to_ave ( window , eqdata , ** kwargs ) : _selection = kwargs . get ( 'selection' , 'Volume' ) _skipstartrows = kwargs . get ( 'skipstartrows' , 0 ) _skipendrows = kwargs . get ( 'skipendrows' , 0 ) _outputcol = kwargs . get ( 'outputcol' , 'Ratio to Ave' ) _size = len ( eqdata . index ) _eqdata = eqdata . loc [ : , _selection ] _sma = _eqdata . iloc [ : - 1 - _skipendrows ] . rolling ( window = window , center = False ) . mean ( ) . values _outdata = _eqdata . values [ window + _skipstartrows : _size - _skipendrows ] / _sma [ window + _skipstartrows - 1 : ] _index = eqdata . index [ window + _skipstartrows : _size - _skipendrows ] return pd . DataFrame ( _outdata , index = _index , columns = [ _outputcol ] , dtype = np . float64 )
Return values expressed as ratios to the average over some number of prior sessions .
62,145
def run ( features , labels , regularization = 0. , constfeat = True ) : n_col = ( features . shape [ 1 ] if len ( features . shape ) > 1 else 1 ) reg_matrix = regularization * np . identity ( n_col , dtype = 'float64' ) if constfeat : reg_matrix [ 0 , 0 ] = 0. return np . linalg . lstsq ( features . T . dot ( features ) + reg_matrix , features . T . dot ( labels ) ) [ 0 ]
Run linear regression on the given data .
62,146
def cal ( self , opttype , strike , exp1 , exp2 ) : assert pd . Timestamp ( exp1 ) < pd . Timestamp ( exp2 ) _row1 = _relevant_rows ( self . data , ( strike , exp1 , opttype , ) , "No key for {} strike {} {}" . format ( exp1 , strike , opttype ) ) _row2 = _relevant_rows ( self . data , ( strike , exp2 , opttype , ) , "No key for {} strike {} {}" . format ( exp2 , strike , opttype ) ) _price1 = _getprice ( _row1 ) _price2 = _getprice ( _row2 ) _eq = _row1 . loc [ : , 'Underlying_Price' ] . values [ 0 ] _qt = _row1 . loc [ : , 'Quote_Time' ] . values [ 0 ] _index = [ 'Near' , 'Far' , 'Debit' , 'Underlying_Price' , 'Quote_Time' ] _vals = np . array ( [ _price1 , _price2 , _price2 - _price1 , _eq , _qt ] ) return pd . DataFrame ( _vals , index = _index , columns = [ 'Value' ] )
Metrics for evaluating a calendar spread .
62,147
def expand ( fn , col , inputtype = pd . DataFrame ) : if inputtype == pd . DataFrame : if isinstance ( col , int ) : def _wrapper ( * args , ** kwargs ) : return fn ( args [ 0 ] . iloc [ : , col ] , * args [ 1 : ] , ** kwargs ) return _wrapper def _wrapper ( * args , ** kwargs ) : return fn ( args [ 0 ] . loc [ : , col ] , * args [ 1 : ] , ** kwargs ) return _wrapper elif inputtype == np . ndarray : def _wrapper ( * args , ** kwargs ) : return fn ( args [ 0 ] [ : , col ] , * args [ 1 : ] , ** kwargs ) return _wrapper raise TypeError ( "invalid input type" )
Wrap a function applying to a single column to make a function applying to a multi - dimensional dataframe or ndarray
62,148
def has_na ( eqdata ) : if isinstance ( eqdata , pd . DataFrame ) : _values = eqdata . values else : _values = eqdata return len ( _values [ pd . isnull ( _values ) ] ) > 0
Return false if eqdata contains no missing values .
62,149
def add_const ( features ) : content = np . empty ( ( features . shape [ 0 ] , features . shape [ 1 ] + 1 ) , dtype = 'float64' ) content [ : , 0 ] = 1. if isinstance ( features , np . ndarray ) : content [ : , 1 : ] = features return content content [ : , 1 : ] = features . iloc [ : , : ] . values cols = [ 'Constant' ] + features . columns . tolist ( ) return pd . DataFrame ( data = content , index = features . index , columns = cols , dtype = 'float64' )
Prepend the constant feature 1 as first feature and return the modified feature set .
62,150
def fromcols ( selection , n_sessions , eqdata , ** kwargs ) : _constfeat = kwargs . get ( 'constfeat' , True ) _outcols = [ 'Constant' ] if _constfeat else [ ] _n_rows = len ( eqdata . index ) for _col in selection : _outcols += map ( partial ( _concat , strval = ' ' + _col ) , range ( - n_sessions + 1 , 1 ) ) _features = pd . DataFrame ( index = eqdata . index [ n_sessions - 1 : ] , columns = _outcols , dtype = np . float64 ) _offset = 0 if _constfeat : _features . iloc [ : , 0 ] = 1. _offset += 1 for _col in selection : _values = eqdata . loc [ : , _col ] . values for i in range ( n_sessions ) : _features . iloc [ : , _offset + i ] = _values [ i : _n_rows - n_sessions + i + 1 ] _offset += n_sessions return _features
Generate features from selected columns of a dataframe .
62,151
def fromfuncs ( funcs , n_sessions , eqdata , ** kwargs ) : _skipatstart = kwargs . get ( 'skipatstart' , 0 ) _constfeat = kwargs . get ( 'constfeat' , True ) _outcols = [ 'Constant' ] if _constfeat else [ ] _n_allrows = len ( eqdata . index ) _n_featrows = _n_allrows - _skipatstart - n_sessions + 1 for _func in funcs : _outcols += map ( partial ( _concat , strval = ' ' + _func . title ) , range ( - n_sessions + 1 , 1 ) ) _features = pd . DataFrame ( index = eqdata . index [ _skipatstart + n_sessions - 1 : ] , columns = _outcols , dtype = np . float64 ) _offset = 0 if _constfeat : _features . iloc [ : , 0 ] = 1. _offset += 1 for _func in funcs : _values = _func ( eqdata ) . values _n_values = len ( _values ) for i in range ( n_sessions ) : _val_end = _n_values - n_sessions + i + 1 _features . iloc [ : , _offset + i ] = _values [ _val_end - _n_featrows : _val_end ] _offset += n_sessions return _features
Generate features using a list of functions to apply to input data
62,152
def ln_growth ( eqdata , ** kwargs ) : if 'outputcol' not in kwargs : kwargs [ 'outputcol' ] = 'LnGrowth' return np . log ( growth ( eqdata , ** kwargs ) )
Return the natural log of growth .
62,153
def mse ( predicted , actual ) : diff = predicted - actual return np . average ( diff * diff , axis = 0 )
Mean squared error of predictions .
62,154
def get ( eqprice , callprice , strike , shares = 1 , buycomm = 0. , excomm = 0. , dividend = 0. ) : _index = [ 'Eq Cost' , 'Option Premium' , 'Commission' , 'Total Invested' , 'Dividends' , 'Eq if Ex' , 'Comm if Ex' , 'Profit if Ex' , 'Ret if Ex' , 'Profit if Unch' , 'Ret if Unch' , 'Break_Even Price' , 'Protection Pts' , 'Protection Pct' ] _metrics = pd . DataFrame ( index = _index , columns = [ 'Value' ] ) _shares = float ( shares ) _dividends = _shares * dividend _metrics . loc [ 'Eq Cost' , 'Value' ] = _eqcost = _shares * eqprice _metrics . loc [ 'Option Premium' , 'Value' ] = _optprem = _shares * callprice _metrics . loc [ 'Commission' , 'Value' ] = float ( buycomm ) _metrics . loc [ 'Total Invested' , 'Value' ] = _invested = _eqcost - _optprem + buycomm _metrics . loc [ 'Dividends' , 'Value' ] = _dividends _metrics . loc [ 'Eq if Ex' , 'Value' ] = _eqsale = strike * _shares _metrics . loc [ 'Comm if Ex' , 'Value' ] = float ( excomm ) _metrics . loc [ 'Profit if Ex' , 'Value' ] = _profitex = _eqsale + _dividends - _invested - excomm _metrics . loc [ 'Ret if Ex' , 'Value' ] = round ( _profitex / _invested , _constants . NDIGITS_SIG ) _metrics . loc [ 'Profit if Unch' , 'Value' ] = _profitunch = _eqcost + _dividends - _invested _metrics . loc [ 'Ret if Unch' , 'Value' ] = round ( _profitunch / _invested , _constants . NDIGITS_SIG ) _metrics . loc [ 'Break_Even Price' , 'Value' ] = _breakeven = round ( ( _invested - _dividends ) / _shares , _constants . NDIGITS_SIG ) _metrics . loc [ 'Protection Pts' , 'Value' ] = _protpts = eqprice - _breakeven _metrics . loc [ 'Protection Pct' , 'Value' ] = round ( _protpts / eqprice , _constants . NDIGITS_SIG ) return _metrics
Metrics for covered calls .
62,155
def is_bday ( date , bday = None ) : _date = Timestamp ( date ) if bday is None : bday = CustomBusinessDay ( calendar = USFederalHolidayCalendar ( ) ) return _date == ( _date + bday ) - bday
Return true iff the given date is a business day .
62,156
def compare ( eq_dfs , columns = None , selection = 'Adj Close' ) : content = np . empty ( ( eq_dfs [ 0 ] . shape [ 0 ] , len ( eq_dfs ) ) , dtype = np . float64 ) rel_perf = pd . DataFrame ( content , eq_dfs [ 0 ] . index , columns , dtype = np . float64 ) for i in range ( len ( eq_dfs ) ) : rel_perf . iloc [ : , i ] = eq_dfs [ i ] . loc [ : , selection ] / eq_dfs [ i ] . iloc [ 0 ] . loc [ selection ] return rel_perf
Get the relative performance of multiple equities .
62,157
def diagbtrfly ( self , lowstrike , midstrike , highstrike , expiry1 , expiry2 ) : assert lowstrike < midstrike assert midstrike < highstrike assert pd . Timestamp ( expiry1 ) < pd . Timestamp ( expiry2 ) _rows1 = { } _rows2 = { } _prices1 = { } _prices2 = { } _index = [ 'Straddle Call' , 'Straddle Put' , 'Straddle Total' , 'Far Call' , 'Far Put' , 'Far Total' , 'Straddle to Far Ratio' , 'Credit' , 'Underlying_Price' , 'Quote_Time' ] _metrics = pd . DataFrame ( index = _index , columns = [ 'Value' ] ) _errmsg = "No key for {} strike {} {}" _opttype = 'call' _rows1 [ _opttype ] = _relevant_rows ( self . data , ( midstrike , expiry1 , _opttype ) , _errmsg . format ( expiry1 , midstrike , _opttype ) ) _prices1 [ _opttype ] = _getprice ( _rows1 [ _opttype ] ) _rows2 [ _opttype ] = _relevant_rows ( self . data , ( highstrike , expiry2 , _opttype ) , _errmsg . format ( expiry2 , highstrike , _opttype ) ) _prices2 [ _opttype ] = _getprice ( _rows2 [ _opttype ] ) _metrics . loc [ 'Straddle Call' , 'Value' ] = _prices1 [ _opttype ] _metrics . loc [ 'Far Call' , 'Value' ] = _prices2 [ _opttype ] _metrics . loc [ 'Underlying_Price' , 'Value' ] , _metrics . loc [ 'Quote_Time' , 'Value' ] = _getkeys ( _rows1 [ _opttype ] , [ 'Underlying_Price' , 'Quote_Time' ] ) _opttype = 'put' _rows1 [ _opttype ] = _relevant_rows ( self . data , ( midstrike , expiry1 , _opttype ) , _errmsg . format ( expiry1 , midstrike , _opttype ) ) _prices1 [ _opttype ] = _getprice ( _rows1 [ _opttype ] ) _rows2 [ _opttype ] = _relevant_rows ( self . data , ( lowstrike , expiry2 , _opttype ) , _errmsg . format ( expiry2 , lowstrike , _opttype ) ) _prices2 [ _opttype ] = _getprice ( _rows2 [ _opttype ] ) _metrics . loc [ 'Straddle Put' , 'Value' ] = _prices1 [ _opttype ] _metrics . loc [ 'Far Put' , 'Value' ] = _prices2 [ _opttype ] _metrics . loc [ 'Straddle Total' , 'Value' ] = _neartot = sum ( _prices1 . values ( ) ) _metrics . loc [ 'Far Total' , 'Value' ] = _fartot = sum ( _prices2 . values ( ) ) _metrics . loc [ 'Straddle to Far Ratio' , 'Value' ] = _neartot / _fartot _metrics . loc [ 'Credit' , 'Value' ] = _neartot - _fartot return _metrics
Metrics for evaluating a diagonal butterfly spread .
62,158
def info ( self ) : print ( "Expirations:" ) _i = 0 for _datetime in self . data . index . levels [ 1 ] . to_pydatetime ( ) : print ( "{:2d} {}" . format ( _i , _datetime . strftime ( '%Y-%m-%d' ) ) ) _i += 1 print ( "Stock: {:.2f}" . format ( self . data . iloc [ 0 ] . loc [ 'Underlying_Price' ] ) ) print ( "Quote time: {}" . format ( self . quotetime ( ) . strftime ( '%Y-%m-%d %H:%M%z' ) ) ) return self , self . exps ( )
Show expiration dates equity price quote time .
62,159
def tolist ( self ) : return [ _todict ( key , self . data . loc [ key , : ] ) for key in self . data . index ]
Return the array as a list of rows .
62,160
def _generate_username ( self ) : while True : username = str ( uuid . uuid4 ( ) ) username = username . replace ( '-' , '' ) username = username [ : - 2 ] try : User . objects . get ( username = username ) except User . DoesNotExist : return username
Generate a unique username
62,161
def update_model_cache ( table_name ) : model_cache_info = ModelCacheInfo ( table_name , uuid . uuid4 ( ) . hex ) model_cache_backend . share_model_cache_info ( model_cache_info )
Updates model cache by generating a new key for the model
62,162
def invalidate_model_cache ( sender , instance , ** kwargs ) : logger . debug ( 'Received post_save/post_delete signal from sender {0}' . format ( sender ) ) if django . VERSION >= ( 1 , 8 ) : related_tables = set ( [ f . related_model . _meta . db_table for f in sender . _meta . get_fields ( ) if f . related_model is not None and ( ( ( f . one_to_many or f . one_to_one ) and f . auto_created ) or f . many_to_one or ( f . many_to_many and not f . auto_created ) ) ] ) else : related_tables = set ( [ rel . model . _meta . db_table for rel in sender . _meta . get_all_related_objects ( ) ] ) related_tables |= set ( [ field . rel . to . _meta . db_table for field in sender . _meta . fields if issubclass ( type ( field ) , RelatedField ) ] ) logger . debug ( 'Related tables of sender {0} are {1}' . format ( sender , related_tables ) ) update_model_cache ( sender . _meta . db_table ) for related_table in related_tables : update_model_cache ( related_table )
Signal receiver for models to invalidate model cache of sender and related models . Model cache is invalidated by generating new key for each model .
62,163
def invalidate_m2m_cache ( sender , instance , model , ** kwargs ) : logger . debug ( 'Received m2m_changed signals from sender {0}' . format ( sender ) ) update_model_cache ( instance . _meta . db_table ) update_model_cache ( model . _meta . db_table )
Signal receiver for models to invalidate model cache for many - to - many relationship .
62,164
def generate_key ( self ) : sql = self . sql ( ) key , created = self . get_or_create_model_key ( ) if created : db_table = self . model . _meta . db_table logger . debug ( 'created new key {0} for model {1}' . format ( key , db_table ) ) model_cache_info = ModelCacheInfo ( db_table , key ) model_cache_backend . share_model_cache_info ( model_cache_info ) query_key = u'{model_key}{qs}{db}' . format ( model_key = key , qs = sql , db = self . db ) key = hashlib . md5 ( query_key . encode ( 'utf-8' ) ) . hexdigest ( ) return key
Generate cache key for the current query . If a new key is created for the model it is then shared with other consumers .
62,165
def sql ( self ) : clone = self . query . clone ( ) sql , params = clone . get_compiler ( using = self . db ) . as_sql ( ) return sql % params
Get sql for the current query .
62,166
def get_or_create_model_key ( self ) : model_cache_info = model_cache_backend . retrieve_model_cache_info ( self . model . _meta . db_table ) if not model_cache_info : return uuid . uuid4 ( ) . hex , True return model_cache_info . table_key , False
Get or create key for the model .
62,167
def invalidate_model_cache ( self ) : logger . info ( 'Invalidating cache for table {0}' . format ( self . model . _meta . db_table ) ) if django . VERSION >= ( 1 , 8 ) : related_tables = set ( [ f . related_model . _meta . db_table for f in self . model . _meta . get_fields ( ) if ( ( f . one_to_many or f . one_to_one ) and f . auto_created ) or f . many_to_one or ( f . many_to_many and not f . auto_created ) ] ) else : related_tables = set ( [ rel . model . _meta . db_table for rel in self . model . _meta . get_all_related_objects ( ) ] ) related_tables |= set ( [ field . rel . to . _meta . db_table for field in self . model . _meta . fields if issubclass ( type ( field ) , RelatedField ) ] ) logger . debug ( 'Related tables of model {0} are {1}' . format ( self . model , related_tables ) ) update_model_cache ( self . model . _meta . db_table ) for related_table in related_tables : update_model_cache ( related_table )
Invalidate model cache by generating new key for the model .
62,168
def cache_backend ( self ) : if not hasattr ( self , '_cache_backend' ) : if hasattr ( django . core . cache , 'caches' ) : self . _cache_backend = django . core . cache . caches [ _cache_name ] else : self . _cache_backend = django . core . cache . get_cache ( _cache_name ) return self . _cache_backend
Get the cache backend
62,169
def import_file ( filename ) : pathname , filename = os . path . split ( filename ) modname = re . match ( r'(?P<modname>\w+)\.py' , filename ) . group ( 'modname' ) file , path , desc = imp . find_module ( modname , [ pathname ] ) try : imp . load_module ( modname , file , path , desc ) finally : file . close ( )
Import a file that will trigger the population of Orca .
62,170
def check_is_table ( func ) : @ wraps ( func ) def wrapper ( ** kwargs ) : if not orca . is_table ( kwargs [ 'table_name' ] ) : abort ( 404 ) return func ( ** kwargs ) return wrapper
Decorator that will check whether the table_name keyword argument to the wrapped function matches a registered Orca table .
62,171
def check_is_column ( func ) : @ wraps ( func ) def wrapper ( ** kwargs ) : table_name = kwargs [ 'table_name' ] col_name = kwargs [ 'col_name' ] if not orca . is_table ( table_name ) : abort ( 404 ) if col_name not in orca . get_table ( table_name ) . columns : abort ( 404 ) return func ( ** kwargs ) return wrapper
Decorator that will check whether the table_name and col_name keyword arguments to the wrapped function match a registered Orca table and column .
62,172
def check_is_injectable ( func ) : @ wraps ( func ) def wrapper ( ** kwargs ) : name = kwargs [ 'inj_name' ] if not orca . is_injectable ( name ) : abort ( 404 ) return func ( ** kwargs ) return wrapper
Decorator that will check whether the inj_name keyword argument to the wrapped function matches a registered Orca injectable .
62,173
def schema ( ) : tables = orca . list_tables ( ) cols = { t : orca . get_table ( t ) . columns for t in tables } steps = orca . list_steps ( ) injectables = orca . list_injectables ( ) broadcasts = orca . list_broadcasts ( ) return jsonify ( tables = tables , columns = cols , steps = steps , injectables = injectables , broadcasts = broadcasts )
All tables columns steps injectables and broadcasts registered with Orca . Includes local columns on tables .
62,174
def table_preview ( table_name ) : preview = orca . get_table ( table_name ) . to_frame ( ) . head ( ) return ( preview . to_json ( orient = 'split' , date_format = 'iso' ) , 200 , { 'Content-Type' : 'application/json' } )
Returns the first five rows of a table as JSON . Inlcudes all columns . Uses Pandas split JSON format .
62,175
def table_describe ( table_name ) : desc = orca . get_table ( table_name ) . to_frame ( ) . describe ( ) return ( desc . to_json ( orient = 'split' , date_format = 'iso' ) , 200 , { 'Content-Type' : 'application/json' } )
Return summary statistics of a table as JSON . Includes all columns . Uses Pandas split JSON format .
62,176
def table_definition ( table_name ) : if orca . table_type ( table_name ) == 'dataframe' : return jsonify ( type = 'dataframe' ) filename , lineno , source = orca . get_raw_table ( table_name ) . func_source_data ( ) html = highlight ( source , PythonLexer ( ) , HtmlFormatter ( ) ) return jsonify ( type = 'function' , filename = filename , lineno = lineno , text = source , html = html )
Get the source of a table function .
62,177
def table_groupbyagg ( table_name ) : table = orca . get_table ( table_name ) column = request . args . get ( 'column' , None ) if not column or column not in table . columns : abort ( 400 ) by = request . args . get ( 'by' , None ) level = request . args . get ( 'level' , None ) if ( not by and not level ) or ( by and level ) : abort ( 400 ) agg = request . args . get ( 'agg' , None ) if not agg or agg not in _GROUPBY_AGG_MAP : abort ( 400 ) column = table . get_column ( column ) if level : try : level = int ( level ) except ValueError : pass gby = column . groupby ( level = level ) else : by = table . get_column ( by ) gby = column . groupby ( by ) result = _GROUPBY_AGG_MAP [ agg ] ( gby ) return ( result . to_json ( orient = 'split' , date_format = 'iso' ) , 200 , { 'Content-Type' : 'application/json' } )
Perform a groupby on a table and return an aggregation on a single column .
62,178
def column_preview ( table_name , col_name ) : col = orca . get_table ( table_name ) . get_column ( col_name ) . head ( 10 ) return ( col . to_json ( orient = 'split' , date_format = 'iso' ) , 200 , { 'Content-Type' : 'application/json' } )
Return the first ten elements of a column as JSON in Pandas split format .
62,179
def column_definition ( table_name , col_name ) : col_type = orca . get_table ( table_name ) . column_type ( col_name ) if col_type != 'function' : return jsonify ( type = col_type ) filename , lineno , source = orca . get_raw_column ( table_name , col_name ) . func_source_data ( ) html = highlight ( source , PythonLexer ( ) , HtmlFormatter ( ) ) return jsonify ( type = 'function' , filename = filename , lineno = lineno , text = source , html = html )
Get the source of a column function .
62,180
def column_describe ( table_name , col_name ) : col_desc = orca . get_table ( table_name ) . get_column ( col_name ) . describe ( ) return ( col_desc . to_json ( orient = 'split' ) , 200 , { 'Content-Type' : 'application/json' } )
Return summary statistics of a column as JSON . Uses Pandas split JSON format .
62,181
def column_csv ( table_name , col_name ) : csv = orca . get_table ( table_name ) . get_column ( col_name ) . to_csv ( path = None ) return csv , 200 , { 'Content-Type' : 'text/csv' }
Return a column as CSV using Pandas default CSV output .
62,182
def injectable_repr ( inj_name ) : i = orca . get_injectable ( inj_name ) return jsonify ( type = str ( type ( i ) ) , repr = repr ( i ) )
Returns the type and repr of an injectable . JSON response has type and repr keys .
62,183
def injectable_definition ( inj_name ) : inj_type = orca . injectable_type ( inj_name ) if inj_type == 'variable' : return jsonify ( type = 'variable' ) else : filename , lineno , source = orca . get_injectable_func_source_data ( inj_name ) html = highlight ( source , PythonLexer ( ) , HtmlFormatter ( ) ) return jsonify ( type = 'function' , filename = filename , lineno = lineno , text = source , html = html )
Get the source of an injectable function .
62,184
def list_broadcasts ( ) : casts = [ { 'cast' : b [ 0 ] , 'onto' : b [ 1 ] } for b in orca . list_broadcasts ( ) ] return jsonify ( broadcasts = casts )
List all registered broadcasts as a list of objects with keys cast and onto .
62,185
def broadcast_definition ( cast_name , onto_name ) : if not orca . is_broadcast ( cast_name , onto_name ) : abort ( 404 ) b = orca . get_broadcast ( cast_name , onto_name ) return jsonify ( cast = b . cast , onto = b . onto , cast_on = b . cast_on , onto_on = b . onto_on , cast_index = b . cast_index , onto_index = b . onto_index )
Return the definition of a broadcast as an object with keys cast onto cast_on onto_on cast_index and onto_index . These are the same as the arguments to the broadcast function .
62,186
def step_definition ( step_name ) : if not orca . is_step ( step_name ) : abort ( 404 ) filename , lineno , source = orca . get_step ( step_name ) . func_source_data ( ) html = highlight ( source , PythonLexer ( ) , HtmlFormatter ( ) ) return jsonify ( filename = filename , lineno = lineno , text = source , html = html )
Get the source of a step function . Returned object has keys filename lineno text and html . text is the raw text of the function html has been marked up by Pygments .
62,187
def _add_log_handler ( handler , level = None , fmt = None , datefmt = None , propagate = None ) : if not fmt : fmt = US_LOG_FMT if not datefmt : datefmt = US_LOG_DATE_FMT handler . setFormatter ( logging . Formatter ( fmt = fmt , datefmt = datefmt ) ) if level is not None : handler . setLevel ( level ) logger = logging . getLogger ( 'orca' ) logger . addHandler ( handler ) if propagate is not None : logger . propagate = propagate
Add a logging handler to Orca .
62,188
def log_to_stream ( level = None , fmt = None , datefmt = None ) : _add_log_handler ( logging . StreamHandler ( ) , fmt = fmt , datefmt = datefmt , propagate = False )
Send log messages to the console .
62,189
def clear_all ( ) : _TABLES . clear ( ) _COLUMNS . clear ( ) _STEPS . clear ( ) _BROADCASTS . clear ( ) _INJECTABLES . clear ( ) _TABLE_CACHE . clear ( ) _COLUMN_CACHE . clear ( ) _INJECTABLE_CACHE . clear ( ) for m in _MEMOIZED . values ( ) : m . value . clear_cached ( ) _MEMOIZED . clear ( ) logger . debug ( 'pipeline state cleared' )
Clear any and all stored state from Orca .
62,190
def _collect_variables ( names , expressions = None ) : if not expressions : expressions = [ ] offset = len ( names ) - len ( expressions ) labels_map = dict ( tz . concatv ( tz . compatibility . zip ( names [ : offset ] , names [ : offset ] ) , tz . compatibility . zip ( names [ offset : ] , expressions ) ) ) all_variables = tz . merge ( _INJECTABLES , _TABLES ) variables = { } for label , expression in labels_map . items ( ) : if '.' in expression : table_name , column_name = expression . split ( '.' ) table = get_table ( table_name ) variables [ label ] = table . get_column ( column_name ) else : thing = all_variables [ expression ] if isinstance ( thing , ( _InjectableFuncWrapper , TableFuncWrapper ) ) : variables [ label ] = thing ( ) else : variables [ label ] = thing return variables
Map labels and expressions to registered variables .
62,191
def add_table ( table_name , table , cache = False , cache_scope = _CS_FOREVER , copy_col = True ) : if isinstance ( table , Callable ) : table = TableFuncWrapper ( table_name , table , cache = cache , cache_scope = cache_scope , copy_col = copy_col ) else : table = DataFrameWrapper ( table_name , table , copy_col = copy_col ) table . clear_cached ( ) logger . debug ( 'registering table {!r}' . format ( table_name ) ) _TABLES [ table_name ] = table return table
Register a table with Orca .
62,192
def table ( table_name = None , cache = False , cache_scope = _CS_FOREVER , copy_col = True ) : def decorator ( func ) : if table_name : name = table_name else : name = func . __name__ add_table ( name , func , cache = cache , cache_scope = cache_scope , copy_col = copy_col ) return func return decorator
Decorates functions that return DataFrames .
62,193
def get_table ( table_name ) : table = get_raw_table ( table_name ) if isinstance ( table , TableFuncWrapper ) : table = table ( ) return table
Get a registered table .
62,194
def table_type ( table_name ) : table = get_raw_table ( table_name ) if isinstance ( table , DataFrameWrapper ) : return 'dataframe' elif isinstance ( table , TableFuncWrapper ) : return 'function'
Returns the type of a registered table .
62,195
def add_column ( table_name , column_name , column , cache = False , cache_scope = _CS_FOREVER ) : if isinstance ( column , Callable ) : column = _ColumnFuncWrapper ( table_name , column_name , column , cache = cache , cache_scope = cache_scope ) else : column = _SeriesWrapper ( table_name , column_name , column ) column . clear_cached ( ) logger . debug ( 'registering column {!r} on table {!r}' . format ( column_name , table_name ) ) _COLUMNS [ ( table_name , column_name ) ] = column return column
Add a new column to a table from a Series or callable .
62,196
def column ( table_name , column_name = None , cache = False , cache_scope = _CS_FOREVER ) : def decorator ( func ) : if column_name : name = column_name else : name = func . __name__ add_column ( table_name , name , func , cache = cache , cache_scope = cache_scope ) return func return decorator
Decorates functions that return a Series .
62,197
def _columns_for_table ( table_name ) : return { cname : col for ( tname , cname ) , col in _COLUMNS . items ( ) if tname == table_name }
Return all of the columns registered for a given table .
62,198
def get_raw_column ( table_name , column_name ) : try : return _COLUMNS [ ( table_name , column_name ) ] except KeyError : raise KeyError ( 'column {!r} not found for table {!r}' . format ( column_name , table_name ) )
Get a wrapped registered column .
62,199
def _memoize_function ( f , name , cache_scope = _CS_FOREVER ) : cache = { } @ wraps ( f ) def wrapper ( * args , ** kwargs ) : try : cache_key = ( args or None , frozenset ( kwargs . items ( ) ) if kwargs else None ) in_cache = cache_key in cache except TypeError : raise TypeError ( 'function arguments must be hashable for memoization' ) if _CACHING and in_cache : return cache [ cache_key ] else : result = f ( * args , ** kwargs ) cache [ cache_key ] = result return result wrapper . __wrapped__ = f wrapper . cache = cache wrapper . clear_cached = lambda : cache . clear ( ) _MEMOIZED [ name ] = CacheItem ( name , wrapper , cache_scope ) return wrapper
Wraps a function for memoization and ties it s cache into the Orca cacheing system .