idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
11,000
def get_balance ( self ) : if not SMSGLOBAL_CHECK_BALANCE_COUNTRY : raise Exception ( 'SMSGLOBAL_CHECK_BALANCE_COUNTRY setting must be set to check balance.' ) params = { 'user' : self . get_username ( ) , 'password' : self . get_password ( ) , 'country' : SMSGLOBAL_CHECK_BALANCE_COUNTRY , } req = urllib2 . Request ( SMSGLOBAL_API_URL_CHECKBALANCE , urllib . urlencode ( params ) ) response = urllib2 . urlopen ( req ) . read ( ) if response . startswith ( 'ERROR' ) : raise Exception ( 'Error retrieving balance: %s' % response . replace ( 'ERROR:' , '' ) ) return dict ( [ ( p . split ( ':' ) [ 0 ] . lower ( ) , p . split ( ':' ) [ 1 ] ) for p in response . split ( ';' ) if len ( p ) > 0 ] )
Get balance with provider .
11,001
def send_messages ( self , sms_messages ) : if not sms_messages : return num_sent = 0 for message in sms_messages : if self . _send ( message ) : num_sent += 1 return num_sent
Sends one or more SmsMessage objects and returns the number of sms messages sent .
11,002
def send_sms ( body , from_phone , to , flash = False , fail_silently = False , auth_user = None , auth_password = None , connection = None ) : from sendsms . message import SmsMessage connection = connection or get_connection ( username = auth_user , password = auth_password , fail_silently = fail_silently ) return SmsMessage ( body = body , from_phone = from_phone , to = to , flash = flash , connection = connection ) . send ( )
Easy wrapper for send a single SMS to a recipient list .
11,003
def open ( self ) : self . client = SmsGateApi ( getattr ( settings , 'SMS_SLUZBA_API_LOGIN' , '' ) , getattr ( settings , 'SMS_SLUZBA_API_PASSWORD' , '' ) , getattr ( settings , 'SMS_SLUZBA_API_TIMEOUT' , 2 ) , getattr ( settings , 'SMS_SLUZBA_API_USE_SSL' , True ) )
Initializes sms . sluzba . cz API library .
11,004
def send_messages ( self , messages ) : count = 0 for message in messages : message_body = unicodedata . normalize ( 'NFKD' , unicode ( message . body ) ) . encode ( 'ascii' , 'ignore' ) for tel_number in message . to : try : self . client . send ( tel_number , message_body , getattr ( settings , 'SMS_SLUZBA_API_USE_POST' , True ) ) except Exception : if self . fail_silently : log . exception ( 'Error while sending sms via sms.sluzba.cz backend API.' ) else : raise else : count += 1 return count
Sending SMS messages via sms . sluzba . cz API .
11,005
def _send ( self , message ) : params = { 'EsendexUsername' : self . get_username ( ) , 'EsendexPassword' : self . get_password ( ) , 'EsendexAccount' : self . get_account ( ) , 'EsendexOriginator' : message . from_phone , 'EsendexRecipient' : "," . join ( message . to ) , 'EsendexBody' : message . body , 'EsendexPlainText' : '1' } if ESENDEX_SANDBOX : params [ 'EsendexTest' ] = '1' response = requests . post ( ESENDEX_API_URL , params ) if response . status_code != 200 : if not self . fail_silently : raise Exception ( 'Bad status code' ) else : return False if not response . content . startswith ( b'Result' ) : if not self . fail_silently : raise Exception ( 'Bad result' ) else : return False response = self . _parse_response ( response . content . decode ( 'utf8' ) ) if ESENDEX_SANDBOX and response [ 'Result' ] == 'Test' : return True else : if response [ 'Result' ] . startswith ( 'OK' ) : return True else : if not self . fail_silently : raise Exception ( 'Bad result' ) return False
Private method to send one message .
11,006
def send ( self , fail_silently = False ) : if not self . to : return 0 res = self . get_connection ( fail_silently ) . send_messages ( [ self ] ) sms_post_send . send ( sender = self , to = self . to , from_phone = self . from_phone , body = self . body ) return res
Sends the sms message
11,007
def _send ( self , message ) : params = { 'from' : message . from_phone , 'to' : "," . join ( message . to ) , 'text' : message . body , 'api_key' : self . get_api_key ( ) , 'api_secret' : self . get_api_secret ( ) , } print ( params ) logger . debug ( "POST to %r with body: %r" , NEXMO_API_URL , params ) return self . parse ( NEXMO_API_URL , requests . post ( NEXMO_API_URL , data = params ) )
A helper method that does the actual sending
11,008
def get_market_summary ( self , market ) : return self . _api_query ( path_dict = { API_V1_1 : '/public/getmarketsummary' , API_V2_0 : '/pub/Market/GetMarketSummary' } , options = { 'market' : market , 'marketname' : market } , protection = PROTECTION_PUB )
Used to get the last 24 hour summary of all active exchanges in specific coin
11,009
def get_orderbook ( self , market , depth_type = BOTH_ORDERBOOK ) : return self . _api_query ( path_dict = { API_V1_1 : '/public/getorderbook' , API_V2_0 : '/pub/Market/GetMarketOrderBook' } , options = { 'market' : market , 'marketname' : market , 'type' : depth_type } , protection = PROTECTION_PUB )
Used to get retrieve the orderbook for a given market .
11,010
def get_market_history ( self , market ) : return self . _api_query ( path_dict = { API_V1_1 : '/public/getmarkethistory' , } , options = { 'market' : market , 'marketname' : market } , protection = PROTECTION_PUB )
Used to retrieve the latest trades that have occurred for a specific market .
11,011
def buy_limit ( self , market , quantity , rate ) : return self . _api_query ( path_dict = { API_V1_1 : '/market/buylimit' , } , options = { 'market' : market , 'quantity' : quantity , 'rate' : rate } , protection = PROTECTION_PRV )
Used to place a buy order in a specific market . Use buylimit to place limit orders Make sure you have the proper permissions set on your API keys for this call to work
11,012
def get_open_orders ( self , market = None ) : return self . _api_query ( path_dict = { API_V1_1 : '/market/getopenorders' , API_V2_0 : '/key/market/getopenorders' } , options = { 'market' : market , 'marketname' : market } if market else None , protection = PROTECTION_PRV )
Get all orders that you currently have opened . A specific market can be requested .
11,013
def get_deposit_address ( self , currency ) : return self . _api_query ( path_dict = { API_V1_1 : '/account/getdepositaddress' , API_V2_0 : '/key/balance/getdepositaddress' } , options = { 'currency' : currency , 'currencyname' : currency } , protection = PROTECTION_PRV )
Used to generate or retrieve an address for a specific currency
11,014
def withdraw ( self , currency , quantity , address , paymentid = None ) : options = { 'currency' : currency , 'quantity' : quantity , 'address' : address } if paymentid : options [ 'paymentid' ] = paymentid return self . _api_query ( path_dict = { API_V1_1 : '/account/withdraw' , API_V2_0 : '/key/balance/withdrawcurrency' } , options = options , protection = PROTECTION_PRV )
Used to withdraw funds from your account
11,015
def get_order_history ( self , market = None ) : if market : return self . _api_query ( path_dict = { API_V1_1 : '/account/getorderhistory' , API_V2_0 : '/key/market/GetOrderHistory' } , options = { 'market' : market , 'marketname' : market } , protection = PROTECTION_PRV ) else : return self . _api_query ( path_dict = { API_V1_1 : '/account/getorderhistory' , API_V2_0 : '/key/orders/getorderhistory' } , protection = PROTECTION_PRV )
Used to retrieve order trade history of account
11,016
def get_order ( self , uuid ) : return self . _api_query ( path_dict = { API_V1_1 : '/account/getorder' , API_V2_0 : '/key/orders/getorder' } , options = { 'uuid' : uuid , 'orderid' : uuid } , protection = PROTECTION_PRV )
Used to get details of buy or sell order
11,017
def list_markets_by_currency ( self , currency ) : return [ market [ 'MarketName' ] for market in self . get_markets ( ) [ 'result' ] if market [ 'MarketName' ] . lower ( ) . endswith ( currency . lower ( ) ) ]
Helper function to see which markets exist for a currency .
11,018
def get_pending_withdrawals ( self , currency = None ) : return self . _api_query ( path_dict = { API_V2_0 : '/key/balance/getpendingwithdrawals' } , options = { 'currencyname' : currency } if currency else None , protection = PROTECTION_PRV )
Used to view your pending withdrawals
11,019
def get_candles ( self , market , tick_interval ) : return self . _api_query ( path_dict = { API_V2_0 : '/pub/market/GetTicks' } , options = { 'marketName' : market , 'tickInterval' : tick_interval } , protection = PROTECTION_PUB )
Used to get all tick candles for a market .
11,020
def changelist_view ( self , request , extra_context = None ) : if extra_context is None : extra_context = { } response = self . adv_filters_handle ( request , extra_context = extra_context ) if response : return response return super ( AdminAdvancedFiltersMixin , self ) . changelist_view ( request , extra_context = extra_context )
Add advanced_filters form to changelist context
11,021
def query ( self ) : if not self . b64_query : return None s = QSerializer ( base64 = True ) return s . loads ( self . b64_query )
De - serialize decode and return an ORM query stored in b64_query .
11,022
def query ( self , value ) : if not isinstance ( value , Q ) : raise Exception ( 'Must only be passed a Django (Q)uery object' ) s = QSerializer ( base64 = True ) self . b64_query = s . dumps ( value )
Serialize an ORM query Base - 64 encode it and set it to the b64_query field
11,023
def _build_field_choices ( self , fields ) : return tuple ( sorted ( [ ( fquery , capfirst ( fname ) ) for fquery , fname in fields . items ( ) ] , key = lambda f : f [ 1 ] . lower ( ) ) ) + self . FIELD_CHOICES
Iterate over passed model fields tuple and update initial choices .
11,024
def _parse_query_dict ( query_data , model ) : operator = 'iexact' if query_data [ 'field' ] == '_OR' : query_data [ 'operator' ] = operator return query_data parts = query_data [ 'field' ] . split ( '__' ) if len ( parts ) < 2 : field = parts [ 0 ] else : if parts [ - 1 ] in dict ( AdvancedFilterQueryForm . OPERATORS ) . keys ( ) : field = '__' . join ( parts [ : - 1 ] ) operator = parts [ - 1 ] else : field = query_data [ 'field' ] query_data [ 'field' ] = field mfield = get_fields_from_path ( model , query_data [ 'field' ] ) if not mfield : raise Exception ( 'Field path "%s" could not be followed to a field' ' in model %s' , query_data [ 'field' ] , model ) else : mfield = mfield [ - 1 ] if query_data [ 'value' ] is None : query_data [ 'operator' ] = "isnull" elif query_data [ 'value' ] is True : query_data [ 'operator' ] = "istrue" elif query_data [ 'value' ] is False : query_data [ 'operator' ] = "isfalse" else : if isinstance ( mfield , DateField ) : query_data [ 'operator' ] = "range" else : query_data [ 'operator' ] = operator if isinstance ( query_data . get ( 'value' ) , list ) and query_data [ 'operator' ] == 'range' : date_from = date_to_string ( query_data . get ( 'value_from' ) ) date_to = date_to_string ( query_data . get ( 'value_to' ) ) query_data [ 'value' ] = ',' . join ( [ date_from , date_to ] ) return query_data
Take a list of query field dict and return data for form initialization
11,025
def set_range_value ( self , data ) : dtfrom = data . pop ( 'value_from' ) dtto = data . pop ( 'value_to' ) if dtfrom is dtto is None : self . errors [ 'value' ] = [ 'Date range requires values' ] raise forms . ValidationError ( [ ] ) data [ 'value' ] = ( dtfrom , dtto )
Validates date range by parsing into 2 datetime objects and validating them both .
11,026
def make_query ( self , * args , ** kwargs ) : query = Q ( ) query_dict = self . _build_query_dict ( self . cleaned_data ) if 'negate' in self . cleaned_data and self . cleaned_data [ 'negate' ] : query = query & ~ Q ( ** query_dict ) else : query = query & Q ( ** query_dict ) return query
Returns a Q object from the submitted form
11,027
def generate_query ( self ) : query = Q ( ) ORed = [ ] for form in self . _non_deleted_forms : if not hasattr ( form , 'cleaned_data' ) : continue if form . cleaned_data [ 'field' ] == "_OR" : ORed . append ( query ) query = Q ( ) else : query = query & form . make_query ( ) if ORed : if query : ORed . append ( query ) query = reduce ( operator . or_ , ORed ) return query
Reduces multiple queries into a single usable query
11,028
def initialize_form ( self , instance , model , data = None , extra = None ) : model_fields = self . get_fields_from_model ( model , self . _filter_fields ) forms = [ ] if instance : for field_data in instance . list_fields ( ) : forms . append ( AdvancedFilterQueryForm . _parse_query_dict ( field_data , model ) ) formset = AFQFormSetNoExtra if not extra else AFQFormSet self . fields_formset = formset ( data = data , initial = forms or None , model_fields = model_fields )
Takes a finalized query and generate it s form data
11,029
def login ( ) : cas_token_session_key = current_app . config [ 'CAS_TOKEN_SESSION_KEY' ] redirect_url = create_cas_login_url ( current_app . config [ 'CAS_SERVER' ] , current_app . config [ 'CAS_LOGIN_ROUTE' ] , flask . url_for ( '.login' , origin = flask . session . get ( 'CAS_AFTER_LOGIN_SESSION_URL' ) , _external = True ) ) if 'ticket' in flask . request . args : flask . session [ cas_token_session_key ] = flask . request . args [ 'ticket' ] if cas_token_session_key in flask . session : if validate ( flask . session [ cas_token_session_key ] ) : if 'CAS_AFTER_LOGIN_SESSION_URL' in flask . session : redirect_url = flask . session . pop ( 'CAS_AFTER_LOGIN_SESSION_URL' ) elif flask . request . args . get ( 'origin' ) : redirect_url = flask . request . args [ 'origin' ] else : redirect_url = flask . url_for ( current_app . config [ 'CAS_AFTER_LOGIN' ] ) else : del flask . session [ cas_token_session_key ] current_app . logger . debug ( 'Redirecting to: {0}' . format ( redirect_url ) ) return flask . redirect ( redirect_url )
This route has two purposes . First it is used by the user to login . Second it is used by the CAS to respond with the ticket after the user logs in successfully .
11,030
def logout ( ) : cas_username_session_key = current_app . config [ 'CAS_USERNAME_SESSION_KEY' ] cas_attributes_session_key = current_app . config [ 'CAS_ATTRIBUTES_SESSION_KEY' ] if cas_username_session_key in flask . session : del flask . session [ cas_username_session_key ] if cas_attributes_session_key in flask . session : del flask . session [ cas_attributes_session_key ] if ( current_app . config [ 'CAS_AFTER_LOGOUT' ] is not None ) : redirect_url = create_cas_logout_url ( current_app . config [ 'CAS_SERVER' ] , current_app . config [ 'CAS_LOGOUT_ROUTE' ] , current_app . config [ 'CAS_AFTER_LOGOUT' ] ) else : redirect_url = create_cas_logout_url ( current_app . config [ 'CAS_SERVER' ] , current_app . config [ 'CAS_LOGOUT_ROUTE' ] ) current_app . logger . debug ( 'Redirecting to: {0}' . format ( redirect_url ) ) return flask . redirect ( redirect_url )
When the user accesses this route they are logged out .
11,031
def validate ( ticket ) : cas_username_session_key = current_app . config [ 'CAS_USERNAME_SESSION_KEY' ] cas_attributes_session_key = current_app . config [ 'CAS_ATTRIBUTES_SESSION_KEY' ] current_app . logger . debug ( "validating token {0}" . format ( ticket ) ) cas_validate_url = create_cas_validate_url ( current_app . config [ 'CAS_SERVER' ] , current_app . config [ 'CAS_VALIDATE_ROUTE' ] , flask . url_for ( '.login' , origin = flask . session . get ( 'CAS_AFTER_LOGIN_SESSION_URL' ) , _external = True ) , ticket ) current_app . logger . debug ( "Making GET request to {0}" . format ( cas_validate_url ) ) xml_from_dict = { } isValid = False try : xmldump = urlopen ( cas_validate_url ) . read ( ) . strip ( ) . decode ( 'utf8' , 'ignore' ) xml_from_dict = parse ( xmldump ) isValid = True if "cas:authenticationSuccess" in xml_from_dict [ "cas:serviceResponse" ] else False except ValueError : current_app . logger . error ( "CAS returned unexpected result" ) if isValid : current_app . logger . debug ( "valid" ) xml_from_dict = xml_from_dict [ "cas:serviceResponse" ] [ "cas:authenticationSuccess" ] username = xml_from_dict [ "cas:user" ] flask . session [ cas_username_session_key ] = username if "cas:attributes" in xml_from_dict : attributes = xml_from_dict [ "cas:attributes" ] if "cas:memberOf" in attributes : attributes [ "cas:memberOf" ] = attributes [ "cas:memberOf" ] . lstrip ( '[' ) . rstrip ( ']' ) . split ( ',' ) for group_number in range ( 0 , len ( attributes [ 'cas:memberOf' ] ) ) : attributes [ 'cas:memberOf' ] [ group_number ] = attributes [ 'cas:memberOf' ] [ group_number ] . lstrip ( ' ' ) . rstrip ( ' ' ) flask . session [ cas_attributes_session_key ] = attributes else : current_app . logger . debug ( "invalid" ) return isValid
Will attempt to validate the ticket . If validation fails then False is returned . If validation is successful then True is returned and the validated username is saved in the session under the key CAS_USERNAME_SESSION_KEY while tha validated attributes dictionary is saved under the key CAS_ATTRIBUTES_SESSION_KEY .
11,032
def create_url ( base , path = None , * query ) : url = base if path is not None : url = urljoin ( url , quote ( path ) ) query = filter ( lambda pair : pair [ 1 ] is not None , query ) url = urljoin ( url , '?{0}' . format ( urlencode ( list ( query ) ) ) ) return url
Create a url .
11,033
def create_cas_login_url ( cas_url , cas_route , service , renew = None , gateway = None ) : return create_url ( cas_url , cas_route , ( 'service' , service ) , ( 'renew' , renew ) , ( 'gateway' , gateway ) , )
Create a CAS login URL .
11,034
def create_cas_validate_url ( cas_url , cas_route , service , ticket , renew = None ) : return create_url ( cas_url , cas_route , ( 'service' , service ) , ( 'ticket' , ticket ) , ( 'renew' , renew ) , )
Create a CAS validate URL .
11,035
def namespace_to_dict ( obj ) : if isinstance ( obj , ( argparse . Namespace , optparse . Values ) ) : return vars ( obj ) return obj
If obj is argparse . Namespace or optparse . Values we ll return a dict representation of it else return the original object .
11,036
def xdg_config_dirs ( ) : paths = [ ] if 'XDG_CONFIG_HOME' in os . environ : paths . append ( os . environ [ 'XDG_CONFIG_HOME' ] ) if 'XDG_CONFIG_DIRS' in os . environ : paths . extend ( os . environ [ 'XDG_CONFIG_DIRS' ] . split ( ':' ) ) else : paths . append ( '/etc/xdg' ) paths . append ( '/etc' ) return paths
Returns a list of paths taken from the XDG_CONFIG_DIRS and XDG_CONFIG_HOME environment varibables if they exist
11,037
def config_dirs ( ) : paths = [ ] if platform . system ( ) == 'Darwin' : paths . append ( MAC_DIR ) paths . append ( UNIX_DIR_FALLBACK ) paths . extend ( xdg_config_dirs ( ) ) elif platform . system ( ) == 'Windows' : paths . append ( WINDOWS_DIR_FALLBACK ) if WINDOWS_DIR_VAR in os . environ : paths . append ( os . environ [ WINDOWS_DIR_VAR ] ) else : paths . append ( UNIX_DIR_FALLBACK ) paths . extend ( xdg_config_dirs ( ) ) out = [ ] for path in paths : path = os . path . abspath ( os . path . expanduser ( path ) ) if path not in out : out . append ( path ) return out
Return a platform - specific list of candidates for user configuration directories on the system .
11,038
def load_yaml ( filename ) : try : with open ( filename , 'rb' ) as f : return yaml . load ( f , Loader = Loader ) except ( IOError , yaml . error . YAMLError ) as exc : raise ConfigReadError ( filename , exc )
Read a YAML document from a file . If the file cannot be read or parsed a ConfigReadError is raised .
11,039
def as_template ( value ) : if isinstance ( value , Template ) : return value elif isinstance ( value , abc . Mapping ) : return MappingTemplate ( value ) elif value is int : return Integer ( ) elif isinstance ( value , int ) : return Integer ( value ) elif isinstance ( value , type ) and issubclass ( value , BASESTRING ) : return String ( ) elif isinstance ( value , BASESTRING ) : return String ( value ) elif isinstance ( value , set ) : return Choice ( list ( value ) ) elif ( SUPPORTS_ENUM and isinstance ( value , type ) and issubclass ( value , enum . Enum ) ) : return Choice ( value ) elif isinstance ( value , list ) : return OneOf ( value ) elif value is float : return Number ( ) elif value is None : return Template ( ) elif value is dict : return TypeTemplate ( abc . Mapping ) elif value is list : return TypeTemplate ( abc . Sequence ) elif isinstance ( value , type ) : return TypeTemplate ( value ) else : raise ValueError ( u'cannot convert to template: {0!r}' . format ( value ) )
Convert a simple shorthand Python value to a Template .
11,040
def of ( cls , value ) : if isinstance ( value , ConfigSource ) : return value elif isinstance ( value , dict ) : return ConfigSource ( value ) else : raise TypeError ( u'source value must be a dict' )
Given either a dictionary or a ConfigSource object return a ConfigSource object . This lets a function accept either type of object as an argument .
11,041
def _build_namespace_dict ( cls , obj , dots = False ) : obj = namespace_to_dict ( obj ) if not isinstance ( obj , dict ) : return obj keys = obj . keys ( ) if PY3 else obj . iterkeys ( ) if dots : keys = sorted ( list ( keys ) ) output = { } for key in keys : value = obj [ key ] if value is None : continue save_to = output result = cls . _build_namespace_dict ( value , dots ) if dots : split = key . split ( '.' ) if len ( split ) > 1 : key = split . pop ( ) for child_key in split : if child_key in save_to and isinstance ( save_to [ child_key ] , dict ) : save_to = save_to [ child_key ] else : save_to [ child_key ] = { } save_to = save_to [ child_key ] if key in save_to : save_to [ key ] . update ( result ) else : save_to [ key ] = result return output
Recursively replaces all argparse . Namespace and optparse . Values with dicts and drops any keys with None values .
11,042
def set_args ( self , namespace , dots = False ) : self . set ( self . _build_namespace_dict ( namespace , dots ) )
Overlay parsed command - line arguments generated by a library like argparse or optparse onto this view s value .
11,043
def flatten ( self , redact = False ) : od = OrderedDict ( ) for key , view in self . items ( ) : if redact and view . redact : od [ key ] = REDACTED_TOMBSTONE else : try : od [ key ] = view . flatten ( redact = redact ) except ConfigTypeError : od [ key ] = view . get ( ) return od
Create a hierarchy of OrderedDicts containing the data from this view recursively reifying all views to get their represented values .
11,044
def represent_bool ( self , data ) : if data : value = u'yes' else : value = u'no' return self . represent_scalar ( 'tag:yaml.org,2002:bool' , value )
Represent bool as yes or no instead of true or false .
11,045
def _add_default_source ( self ) : if self . modname : if self . _package_path : filename = os . path . join ( self . _package_path , DEFAULT_FILENAME ) if os . path . isfile ( filename ) : self . add ( ConfigSource ( load_yaml ( filename ) , filename , True ) )
Add the package s default configuration settings . This looks for a YAML file located inside the package for the module modname if it was given .
11,046
def read ( self , user = True , defaults = True ) : if user : self . _add_user_source ( ) if defaults : self . _add_default_source ( )
Find and read the files for this configuration and set them as the sources for this configuration . To disable either discovered user configuration files or the in - package defaults set user or defaults to False .
11,047
def set_file ( self , filename ) : filename = os . path . abspath ( filename ) self . set ( ConfigSource ( load_yaml ( filename ) , filename ) )
Parses the file as YAML and inserts it into the configuration sources with highest priority .
11,048
def dump ( self , full = True , redact = False ) : if full : out_dict = self . flatten ( redact = redact ) else : sources = [ s for s in self . sources if not s . default ] temp_root = RootView ( sources ) temp_root . redactions = self . redactions out_dict = temp_root . flatten ( redact = redact ) yaml_out = yaml . dump ( out_dict , Dumper = Dumper , default_flow_style = None , indent = 4 , width = 1000 ) default_source = None for source in self . sources : if source . default : default_source = source break if default_source and default_source . filename : with open ( default_source . filename , 'rb' ) as fp : default_data = fp . read ( ) yaml_out = restore_yaml_comments ( yaml_out , default_data . decode ( 'utf-8' ) ) return yaml_out
Dump the Configuration object to a YAML file .
11,049
def clear ( self ) : super ( LazyConfig , self ) . clear ( ) self . _lazy_suffix = [ ] self . _lazy_prefix = [ ]
Remove all sources from this configuration .
11,050
def value ( self , view , template = None ) : if view . exists ( ) : value , _ = view . first ( ) return self . convert ( value , view ) elif self . default is REQUIRED : raise NotFoundError ( u"{0} not found" . format ( view . name ) ) else : return self . default
Get the value for a ConfigView .
11,051
def fail ( self , message , view , type_error = False ) : exc_class = ConfigTypeError if type_error else ConfigValueError raise exc_class ( u'{0}: {1}' . format ( view . name , message ) )
Raise an exception indicating that a value cannot be accepted .
11,052
def convert ( self , value , view ) : if isinstance ( value , int ) : return value elif isinstance ( value , float ) : return int ( value ) else : self . fail ( u'must be a number' , view , True )
Check that the value is an integer . Floats are rounded .
11,053
def convert ( self , value , view ) : if isinstance ( value , NUMERIC_TYPES ) : return value else : self . fail ( u'must be numeric, not {0}' . format ( type ( value ) . __name__ ) , view , True )
Check that the value is an int or a float .
11,054
def value ( self , view , template = None ) : out = AttrDict ( ) for key , typ in self . subtemplates . items ( ) : out [ key ] = typ . value ( view [ key ] , self ) return out
Get a dict with the same keys as the template and values validated according to the value types .
11,055
def value ( self , view , template = None ) : out = [ ] for item in view : out . append ( self . subtemplate . value ( item , self ) ) return out
Get a list of items validated against the template .
11,056
def convert ( self , value , view ) : if isinstance ( value , BASESTRING ) : if self . pattern and not self . regex . match ( value ) : self . fail ( u"must match the pattern {0}" . format ( self . pattern ) , view ) return value else : self . fail ( u'must be a string' , view , True )
Check that the value is a string and matches the pattern .
11,057
def convert ( self , value , view ) : is_mapping = isinstance ( self . template , MappingTemplate ) for candidate in self . allowed : try : if is_mapping : if isinstance ( candidate , Filename ) and candidate . relative_to : next_template = candidate . template_with_relatives ( view , self . template ) next_template . subtemplates [ view . key ] = as_template ( candidate ) else : next_template = MappingTemplate ( { view . key : candidate } ) return view . parent . get ( next_template ) [ view . key ] else : return view . get ( candidate ) except ConfigTemplateError : raise except ConfigError : pass except ValueError as exc : raise ConfigTemplateError ( exc ) self . fail ( u'must be one of {0}, not {1}' . format ( repr ( self . allowed ) , repr ( value ) ) , view )
Ensure that the value follows at least one template .
11,058
def export_live_eggs ( self , env = False ) : path_eggs = [ p for p in sys . path if p . endswith ( '.egg' ) ] command = self . get_finalized_command ( "egg_info" ) egg_base = path . abspath ( command . egg_base ) unique_path_eggs = set ( path_eggs + [ egg_base ] ) os . environ [ 'PYTHONPATH' ] = ':' . join ( unique_path_eggs )
Adds all of the eggs in the current environment to PYTHONPATH .
11,059
def get_from_environment ( ) : name = os . environ . get ( "EXECJS_RUNTIME" , "" ) if not name : return None try : return _find_runtime_by_name ( name ) except exceptions . RuntimeUnavailableError : return None
Return the JavaScript runtime that is specified in EXECJS_RUNTIME environment variable . If EXECJS_RUNTIME environment variable is empty or invalid return None .
11,060
def split_table_cells ( self , row ) : row = iter ( row ) col = 0 start_col = col + 1 cell = '' first_cell = True while True : char = next ( row , None ) col += 1 if char == '|' : if first_cell : first_cell = False else : yield ( cell , start_col ) cell = '' start_col = col + 1 elif char == '\\' : char = next ( row ) col += 1 if char == 'n' : cell += '\n' else : if char not in [ '|' , '\\' ] : cell += '\\' cell += char elif char : cell += char else : break
An iterator returning all the table cells in a row with their positions accounting for escaping .
11,061
def as_freq ( data_series , freq , atomic_freq = "1 Min" , series_type = "cumulative" ) : if not isinstance ( data_series , pd . Series ) : raise ValueError ( "expected series, got object with class {}" . format ( data_series . __class__ ) ) if data_series . empty : return data_series series = remove_duplicates ( data_series ) target_freq = pd . Timedelta ( atomic_freq ) timedeltas = ( series . index [ 1 : ] - series . index [ : - 1 ] ) . append ( pd . TimedeltaIndex ( [ pd . NaT ] ) ) if series_type == "cumulative" : spread_factor = target_freq . total_seconds ( ) / timedeltas . total_seconds ( ) series_spread = series * spread_factor atomic_series = series_spread . asfreq ( atomic_freq , method = "ffill" ) resampled = atomic_series . resample ( freq ) . sum ( ) resampled_with_nans = atomic_series . resample ( freq ) . mean ( ) resampled = resampled [ resampled_with_nans . notnull ( ) ] . reindex ( resampled . index ) elif series_type == "instantaneous" : atomic_series = series . asfreq ( atomic_freq , method = "ffill" ) resampled = atomic_series . resample ( freq ) . mean ( ) if resampled . index [ - 1 ] < series . index [ - 1 ] : last_index = pd . date_range ( resampled . index [ - 1 ] , freq = freq , periods = 2 ) [ 1 : ] resampled = ( pd . concat ( [ resampled , pd . Series ( np . nan , index = last_index ) ] ) . resample ( freq ) . mean ( ) ) return resampled
Resample data to a different frequency .
11,062
def get_baseline_data ( data , start = None , end = None , max_days = 365 , allow_billing_period_overshoot = False , ignore_billing_period_gap_for_day_count = False , ) : if max_days is not None : if start is not None : raise ValueError ( "If max_days is set, start cannot be set: start={}, max_days={}." . format ( start , max_days ) ) start_inf = False if start is None : start_target = pytz . UTC . localize ( pd . Timestamp . min ) start_inf = True else : start_target = start end_inf = False if end is None : end_limit = pytz . UTC . localize ( pd . Timestamp . max ) end_inf = True else : end_limit = end data_before_end_limit = data [ : end_limit ] . copy ( ) if ignore_billing_period_gap_for_day_count : end_limit = data_before_end_limit . index . max ( ) if not end_inf and max_days is not None : start_target = end_limit - timedelta ( days = max_days ) if allow_billing_period_overshoot : try : loc = data_before_end_limit . index . get_loc ( start_target , method = "nearest" ) except ( KeyError , IndexError ) : baseline_data = data_before_end_limit start_limit = start_target else : start_limit = data_before_end_limit . index [ loc ] baseline_data = data_before_end_limit [ start_limit : ] . copy ( ) else : start_limit = start_target baseline_data = data_before_end_limit [ start_limit : ] . copy ( ) if baseline_data . dropna ( ) . empty : raise NoBaselineDataError ( ) baseline_data . iloc [ - 1 ] = np . nan data_end = data . index . max ( ) data_start = data . index . min ( ) return ( baseline_data , _make_baseline_warnings ( end_inf , start_inf , data_start , data_end , start_limit , end_limit ) , )
Filter down to baseline period data .
11,063
def get_reporting_data ( data , start = None , end = None , max_days = 365 , allow_billing_period_overshoot = False , ignore_billing_period_gap_for_day_count = False , ) : if max_days is not None : if end is not None : raise ValueError ( "If max_days is set, end cannot be set: end={}, max_days={}." . format ( end , max_days ) ) start_inf = False if start is None : start_limit = pytz . UTC . localize ( pd . Timestamp . min ) start_inf = True else : start_limit = start end_inf = False if end is None : end_target = pytz . UTC . localize ( pd . Timestamp . max ) end_inf = True else : end_target = end data_after_start_limit = data [ start_limit : ] . copy ( ) if ignore_billing_period_gap_for_day_count : start_limit = data_after_start_limit . index . min ( ) if not start_inf and max_days is not None : end_target = start_limit + timedelta ( days = max_days ) if allow_billing_period_overshoot : try : loc = data_after_start_limit . index . get_loc ( end_target , method = "nearest" ) except ( KeyError , IndexError ) : reporting_data = data_after_start_limit end_limit = end_target else : end_limit = data_after_start_limit . index [ loc ] reporting_data = data_after_start_limit [ : end_limit ] . copy ( ) else : end_limit = end_target reporting_data = data_after_start_limit [ : end_limit ] . copy ( ) if reporting_data . dropna ( ) . empty : raise NoReportingDataError ( ) reporting_data . iloc [ - 1 ] = np . nan data_end = data . index . max ( ) data_start = data . index . min ( ) return ( reporting_data , _make_reporting_warnings ( end_inf , start_inf , data_start , data_end , start_limit , end_limit ) , )
Filter down to reporting period data .
11,064
def modeled_savings ( baseline_model , reporting_model , result_index , temperature_data , with_disaggregated = False , confidence_level = 0.90 , predict_kwargs = None , ) : prediction_index = result_index if predict_kwargs is None : predict_kwargs = { } model_type = None if isinstance ( baseline_model , CalTRACKUsagePerDayModelResults ) : model_type = "usage_per_day" if model_type == "usage_per_day" and with_disaggregated : predict_kwargs [ "with_disaggregated" ] = True def _predicted_usage ( model ) : model_prediction = model . predict ( prediction_index , temperature_data , ** predict_kwargs ) predicted_usage = model_prediction . result return predicted_usage predicted_baseline_usage = _predicted_usage ( baseline_model ) predicted_reporting_usage = _predicted_usage ( reporting_model ) modeled_baseline_usage = predicted_baseline_usage [ "predicted_usage" ] . to_frame ( "modeled_baseline_usage" ) modeled_reporting_usage = predicted_reporting_usage [ "predicted_usage" ] . to_frame ( "modeled_reporting_usage" ) def modeled_savings_func ( row ) : return row . modeled_baseline_usage - row . modeled_reporting_usage results = modeled_baseline_usage . join ( modeled_reporting_usage ) . assign ( modeled_savings = modeled_savings_func ) if model_type == "usage_per_day" and with_disaggregated : modeled_baseline_usage_disaggregated = predicted_baseline_usage [ [ "base_load" , "heating_load" , "cooling_load" ] ] . rename ( columns = { "base_load" : "modeled_baseline_base_load" , "heating_load" : "modeled_baseline_heating_load" , "cooling_load" : "modeled_baseline_cooling_load" , } ) modeled_reporting_usage_disaggregated = predicted_reporting_usage [ [ "base_load" , "heating_load" , "cooling_load" ] ] . rename ( columns = { "base_load" : "modeled_reporting_base_load" , "heating_load" : "modeled_reporting_heating_load" , "cooling_load" : "modeled_reporting_cooling_load" , } ) def modeled_base_load_savings_func ( row ) : return row . modeled_baseline_base_load - row . modeled_reporting_base_load def modeled_heating_load_savings_func ( row ) : return ( row . modeled_baseline_heating_load - row . modeled_reporting_heating_load ) def modeled_cooling_load_savings_func ( row ) : return ( row . modeled_baseline_cooling_load - row . modeled_reporting_cooling_load ) results = ( results . join ( modeled_baseline_usage_disaggregated ) . join ( modeled_reporting_usage_disaggregated ) . assign ( modeled_base_load_savings = modeled_base_load_savings_func , modeled_heating_load_savings = modeled_heating_load_savings_func , modeled_cooling_load_savings = modeled_cooling_load_savings_func , ) ) results = results . dropna ( ) . reindex ( results . index ) error_bands = None if model_type == "usage_per_day" : error_bands = _compute_error_bands_modeled_savings ( baseline_model . totals_metrics , reporting_model . totals_metrics , results , baseline_model . interval , reporting_model . interval , confidence_level , ) return results , error_bands
Compute modeled savings i . e . savings in which baseline and reporting usage values are based on models . This is appropriate for annualizing or weather normalizing models .
11,065
def _caltrack_predict_design_matrix ( model_type , model_params , data , disaggregated = False , input_averages = False , output_averages = False , ) : zeros = pd . Series ( 0 , index = data . index ) ones = zeros + 1 if isinstance ( data . index , pd . DatetimeIndex ) : days_per_period = day_counts ( data . index ) else : try : days_per_period = data [ "n_days" ] except KeyError : raise ValueError ( "Data needs DatetimeIndex or an n_days column." ) if model_type in [ "intercept_only" , "hdd_only" , "cdd_only" , "cdd_hdd" ] : intercept = _get_parameter_or_raise ( model_type , model_params , "intercept" ) if output_averages == False : base_load = intercept * days_per_period else : base_load = intercept * ones elif model_type is None : raise ValueError ( "Model not valid for prediction: model_type=None" ) else : raise UnrecognizedModelTypeError ( "invalid caltrack model type: {}" . format ( model_type ) ) if model_type in [ "hdd_only" , "cdd_hdd" ] : beta_hdd = _get_parameter_or_raise ( model_type , model_params , "beta_hdd" ) heating_balance_point = _get_parameter_or_raise ( model_type , model_params , "heating_balance_point" ) hdd_column_name = "hdd_%s" % heating_balance_point hdd = data [ hdd_column_name ] if input_averages == True and output_averages == False : heating_load = hdd * beta_hdd * days_per_period elif input_averages == True and output_averages == True : heating_load = hdd * beta_hdd elif input_averages == False and output_averages == False : heating_load = hdd * beta_hdd else : heating_load = hdd * beta_hdd / days_per_period else : heating_load = zeros if model_type in [ "cdd_only" , "cdd_hdd" ] : beta_cdd = _get_parameter_or_raise ( model_type , model_params , "beta_cdd" ) cooling_balance_point = _get_parameter_or_raise ( model_type , model_params , "cooling_balance_point" ) cdd_column_name = "cdd_%s" % cooling_balance_point cdd = data [ cdd_column_name ] if input_averages == True and output_averages == False : cooling_load = cdd * beta_cdd * days_per_period elif input_averages == True and output_averages == True : cooling_load = cdd * beta_cdd elif input_averages == False and output_averages == False : cooling_load = cdd * beta_cdd else : cooling_load = cdd * beta_cdd / days_per_period else : cooling_load = zeros def _restore_nans ( load ) : load = load [ data . sum ( axis = 1 , skipna = False ) . notnull ( ) ] . reindex ( data . index ) return load base_load = _restore_nans ( base_load ) heating_load = _restore_nans ( heating_load ) cooling_load = _restore_nans ( cooling_load ) if disaggregated : return pd . DataFrame ( { "base_load" : base_load , "heating_load" : heating_load , "cooling_load" : cooling_load , } ) else : return base_load + heating_load + cooling_load
An internal CalTRACK predict method for use with a design matrix of the form used in model fitting .
11,066
def caltrack_usage_per_day_predict ( model_type , model_params , prediction_index , temperature_data , degree_day_method = "daily" , with_disaggregated = False , with_design_matrix = False , ) : if model_params is None : raise MissingModelParameterError ( "model_params is None." ) predict_warnings = [ ] cooling_balance_points = [ ] heating_balance_points = [ ] if "cooling_balance_point" in model_params : cooling_balance_points . append ( model_params [ "cooling_balance_point" ] ) if "heating_balance_point" in model_params : heating_balance_points . append ( model_params [ "heating_balance_point" ] ) design_matrix = compute_temperature_features ( prediction_index , temperature_data , heating_balance_points = heating_balance_points , cooling_balance_points = cooling_balance_points , degree_day_method = degree_day_method , use_mean_daily_values = False , ) if design_matrix . dropna ( ) . empty : if with_disaggregated : empty_columns = { "predicted_usage" : [ ] , "base_load" : [ ] , "heating_load" : [ ] , "cooling_load" : [ ] , } else : empty_columns = { "predicted_usage" : [ ] } predict_warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack.compute_temperature_features" ) , description = ( "Design matrix empty, compute_temperature_features failed" ) , data = { "temperature_data" : temperature_data } , ) ) return ModelPrediction ( pd . DataFrame ( empty_columns ) , design_matrix = pd . DataFrame ( ) , warnings = predict_warnings , ) if degree_day_method == "daily" : design_matrix [ "n_days" ] = ( design_matrix . n_days_kept + design_matrix . n_days_dropped ) else : design_matrix [ "n_days" ] = ( design_matrix . n_hours_kept + design_matrix . n_hours_dropped ) / 24 results = _caltrack_predict_design_matrix ( model_type , model_params , design_matrix , input_averages = False , output_averages = False , ) . to_frame ( "predicted_usage" ) if with_disaggregated : disaggregated = _caltrack_predict_design_matrix ( model_type , model_params , design_matrix , disaggregated = True , input_averages = False , output_averages = False , ) results = results . join ( disaggregated ) if with_design_matrix : results = results . join ( design_matrix ) return ModelPrediction ( result = results , design_matrix = design_matrix , warnings = predict_warnings )
CalTRACK predict method .
11,067
def get_too_few_non_zero_degree_day_warning ( model_type , balance_point , degree_day_type , degree_days , minimum_non_zero ) : warnings = [ ] n_non_zero = int ( ( degree_days > 0 ) . sum ( ) ) if n_non_zero < minimum_non_zero : warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}" . format ( model_type = model_type , degree_day_type = degree_day_type ) ) , description = ( "Number of non-zero daily {degree_day_type} values below accepted minimum." " Candidate fit not attempted." . format ( degree_day_type = degree_day_type . upper ( ) ) ) , data = { "n_non_zero_{degree_day_type}" . format ( degree_day_type = degree_day_type ) : n_non_zero , "minimum_non_zero_{degree_day_type}" . format ( degree_day_type = degree_day_type ) : minimum_non_zero , "{degree_day_type}_balance_point" . format ( degree_day_type = degree_day_type ) : balance_point , } , ) ) return warnings
Return an empty list or a single warning wrapped in a list regarding non - zero degree days for a set of degree days .
11,068
def get_total_degree_day_too_low_warning ( model_type , balance_point , degree_day_type , avg_degree_days , period_days , minimum_total , ) : warnings = [ ] total_degree_days = ( avg_degree_days * period_days ) . sum ( ) if total_degree_days < minimum_total : warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low" . format ( model_type = model_type , degree_day_type = degree_day_type ) ) , description = ( "Total {degree_day_type} below accepted minimum." " Candidate fit not attempted." . format ( degree_day_type = degree_day_type . upper ( ) ) ) , data = { "total_{degree_day_type}" . format ( degree_day_type = degree_day_type ) : total_degree_days , "total_{degree_day_type}_minimum" . format ( degree_day_type = degree_day_type ) : minimum_total , "{degree_day_type}_balance_point" . format ( degree_day_type = degree_day_type ) : balance_point , } , ) ) return warnings
Return an empty list or a single warning wrapped in a list regarding the total summed degree day values .
11,069
def get_parameter_negative_warning ( model_type , model_params , parameter ) : warnings = [ ] if model_params . get ( parameter , 0 ) < 0 : warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack_daily.{model_type}.{parameter}_negative" . format ( model_type = model_type , parameter = parameter ) ) , description = ( "Model fit {parameter} parameter is negative. Candidate model rejected." . format ( parameter = parameter ) ) , data = model_params , ) ) return warnings
Return an empty list or a single warning wrapped in a list indicating whether model parameter is negative .
11,070
def get_parameter_p_value_too_high_warning ( model_type , model_params , parameter , p_value , maximum_p_value ) : warnings = [ ] if p_value > maximum_p_value : data = { "{}_p_value" . format ( parameter ) : p_value , "{}_maximum_p_value" . format ( parameter ) : maximum_p_value , } data . update ( model_params ) warnings . append ( EEMeterWarning ( qualified_name = ( "eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high" . format ( model_type = model_type , parameter = parameter ) ) , description = ( "Model fit {parameter} p-value is too high. Candidate model rejected." . format ( parameter = parameter ) ) , data = data , ) ) return warnings
Return an empty list or a single warning wrapped in a list indicating whether model parameter p - value is too high .
11,071
def get_fit_failed_candidate_model ( model_type , formula ) : warnings = [ EEMeterWarning ( qualified_name = "eemeter.caltrack_daily.{}.model_results" . format ( model_type ) , description = ( "Error encountered in statsmodels.formula.api.ols method. (Empty data?)" ) , data = { "traceback" : traceback . format_exc ( ) } , ) ] return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = "ERROR" , warnings = warnings )
Return a Candidate model that indicates the fitting routine failed .
11,072
def get_intercept_only_candidate_models ( data , weights_col ) : model_type = "intercept_only" formula = "meter_value ~ 1" if weights_col is None : weights = 1 else : weights = data [ weights_col ] try : model = smf . wls ( formula = formula , data = data , weights = weights ) except Exception as e : return [ get_fit_failed_candidate_model ( model_type , formula ) ] result = model . fit ( ) model_params = { "intercept" : result . params [ "Intercept" ] } model_warnings = [ ] for parameter in [ "intercept" ] : model_warnings . extend ( get_parameter_negative_warning ( model_type , model_params , parameter ) ) if len ( model_warnings ) > 0 : status = "DISQUALIFIED" else : status = "QUALIFIED" return [ CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = status , warnings = model_warnings , model_params = model_params , model = model , result = result , r_squared_adj = 0 , ) ]
Return a list of a single candidate intercept - only model .
11,073
def get_single_cdd_only_candidate_model ( data , minimum_non_zero_cdd , minimum_total_cdd , beta_cdd_maximum_p_value , weights_col , balance_point , ) : model_type = "cdd_only" cdd_column = "cdd_%s" % balance_point formula = "meter_value ~ %s" % cdd_column if weights_col is None : weights = 1 else : weights = data [ weights_col ] period_days = weights degree_day_warnings = [ ] degree_day_warnings . extend ( get_total_degree_day_too_low_warning ( model_type , balance_point , "cdd" , data [ cdd_column ] , period_days , minimum_total_cdd , ) ) degree_day_warnings . extend ( get_too_few_non_zero_degree_day_warning ( model_type , balance_point , "cdd" , data [ cdd_column ] , minimum_non_zero_cdd ) ) if len ( degree_day_warnings ) > 0 : return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = "NOT ATTEMPTED" , warnings = degree_day_warnings , ) try : model = smf . wls ( formula = formula , data = data , weights = weights ) except Exception as e : return get_fit_failed_candidate_model ( model_type , formula ) result = model . fit ( ) r_squared_adj = result . rsquared_adj beta_cdd_p_value = result . pvalues [ cdd_column ] model_params = { "intercept" : result . params [ "Intercept" ] , "beta_cdd" : result . params [ cdd_column ] , "cooling_balance_point" : balance_point , } model_warnings = [ ] for parameter in [ "intercept" , "beta_cdd" ] : model_warnings . extend ( get_parameter_negative_warning ( model_type , model_params , parameter ) ) model_warnings . extend ( get_parameter_p_value_too_high_warning ( model_type , model_params , parameter , beta_cdd_p_value , beta_cdd_maximum_p_value , ) ) if len ( model_warnings ) > 0 : status = "DISQUALIFIED" else : status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = status , warnings = model_warnings , model_params = model_params , model = model , result = result , r_squared_adj = r_squared_adj , )
Return a single candidate cdd - only model for a particular balance point .
11,074
def get_cdd_only_candidate_models ( data , minimum_non_zero_cdd , minimum_total_cdd , beta_cdd_maximum_p_value , weights_col ) : balance_points = [ int ( col [ 4 : ] ) for col in data . columns if col . startswith ( "cdd" ) ] candidate_models = [ get_single_cdd_only_candidate_model ( data , minimum_non_zero_cdd , minimum_total_cdd , beta_cdd_maximum_p_value , weights_col , balance_point , ) for balance_point in balance_points ] return candidate_models
Return a list of all possible candidate cdd - only models .
11,075
def get_single_hdd_only_candidate_model ( data , minimum_non_zero_hdd , minimum_total_hdd , beta_hdd_maximum_p_value , weights_col , balance_point , ) : model_type = "hdd_only" hdd_column = "hdd_%s" % balance_point formula = "meter_value ~ %s" % hdd_column if weights_col is None : weights = 1 else : weights = data [ weights_col ] period_days = weights degree_day_warnings = [ ] degree_day_warnings . extend ( get_total_degree_day_too_low_warning ( model_type , balance_point , "hdd" , data [ hdd_column ] , period_days , minimum_total_hdd , ) ) degree_day_warnings . extend ( get_too_few_non_zero_degree_day_warning ( model_type , balance_point , "hdd" , data [ hdd_column ] , minimum_non_zero_hdd ) ) if len ( degree_day_warnings ) > 0 : return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = "NOT ATTEMPTED" , warnings = degree_day_warnings , ) try : model = smf . wls ( formula = formula , data = data , weights = weights ) except Exception as e : return get_fit_failed_candidate_model ( model_type , formula ) result = model . fit ( ) r_squared_adj = result . rsquared_adj beta_hdd_p_value = result . pvalues [ hdd_column ] model_params = { "intercept" : result . params [ "Intercept" ] , "beta_hdd" : result . params [ hdd_column ] , "heating_balance_point" : balance_point , } model_warnings = [ ] for parameter in [ "intercept" , "beta_hdd" ] : model_warnings . extend ( get_parameter_negative_warning ( model_type , model_params , parameter ) ) model_warnings . extend ( get_parameter_p_value_too_high_warning ( model_type , model_params , parameter , beta_hdd_p_value , beta_hdd_maximum_p_value , ) ) if len ( model_warnings ) > 0 : status = "DISQUALIFIED" else : status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = status , warnings = model_warnings , model_params = model_params , model = model , result = result , r_squared_adj = r_squared_adj , )
Return a single candidate hdd - only model for a particular balance point .
11,076
def get_single_cdd_hdd_candidate_model ( data , minimum_non_zero_cdd , minimum_non_zero_hdd , minimum_total_cdd , minimum_total_hdd , beta_cdd_maximum_p_value , beta_hdd_maximum_p_value , weights_col , cooling_balance_point , heating_balance_point , ) : model_type = "cdd_hdd" cdd_column = "cdd_%s" % cooling_balance_point hdd_column = "hdd_%s" % heating_balance_point formula = "meter_value ~ %s + %s" % ( cdd_column , hdd_column ) n_days_column = None if weights_col is None : weights = 1 else : weights = data [ weights_col ] period_days = weights degree_day_warnings = [ ] degree_day_warnings . extend ( get_total_degree_day_too_low_warning ( model_type , cooling_balance_point , "cdd" , data [ cdd_column ] , period_days , minimum_total_cdd , ) ) degree_day_warnings . extend ( get_too_few_non_zero_degree_day_warning ( model_type , cooling_balance_point , "cdd" , data [ cdd_column ] , minimum_non_zero_cdd , ) ) degree_day_warnings . extend ( get_total_degree_day_too_low_warning ( model_type , heating_balance_point , "hdd" , data [ hdd_column ] , period_days , minimum_total_hdd , ) ) degree_day_warnings . extend ( get_too_few_non_zero_degree_day_warning ( model_type , heating_balance_point , "hdd" , data [ hdd_column ] , minimum_non_zero_hdd , ) ) if len ( degree_day_warnings ) > 0 : return CalTRACKUsagePerDayCandidateModel ( model_type , formula , "NOT ATTEMPTED" , warnings = degree_day_warnings ) try : model = smf . wls ( formula = formula , data = data , weights = weights ) except Exception as e : return get_fit_failed_candidate_model ( model_type , formula ) result = model . fit ( ) r_squared_adj = result . rsquared_adj beta_cdd_p_value = result . pvalues [ cdd_column ] beta_hdd_p_value = result . pvalues [ hdd_column ] model_params = { "intercept" : result . params [ "Intercept" ] , "beta_cdd" : result . params [ cdd_column ] , "beta_hdd" : result . params [ hdd_column ] , "cooling_balance_point" : cooling_balance_point , "heating_balance_point" : heating_balance_point , } model_warnings = [ ] for parameter in [ "intercept" , "beta_cdd" , "beta_hdd" ] : model_warnings . extend ( get_parameter_negative_warning ( model_type , model_params , parameter ) ) model_warnings . extend ( get_parameter_p_value_too_high_warning ( model_type , model_params , parameter , beta_cdd_p_value , beta_cdd_maximum_p_value , ) ) model_warnings . extend ( get_parameter_p_value_too_high_warning ( model_type , model_params , parameter , beta_hdd_p_value , beta_hdd_maximum_p_value , ) ) if len ( model_warnings ) > 0 : status = "DISQUALIFIED" else : status = "QUALIFIED" return CalTRACKUsagePerDayCandidateModel ( model_type = model_type , formula = formula , status = status , warnings = model_warnings , model_params = model_params , model = model , result = result , r_squared_adj = r_squared_adj , )
Return and fit a single candidate cdd_hdd model for a particular selection of cooling balance point and heating balance point
11,077
def get_cdd_hdd_candidate_models ( data , minimum_non_zero_cdd , minimum_non_zero_hdd , minimum_total_cdd , minimum_total_hdd , beta_cdd_maximum_p_value , beta_hdd_maximum_p_value , weights_col , ) : cooling_balance_points = [ int ( col [ 4 : ] ) for col in data . columns if col . startswith ( "cdd" ) ] heating_balance_points = [ int ( col [ 4 : ] ) for col in data . columns if col . startswith ( "hdd" ) ] candidate_models = [ get_single_cdd_hdd_candidate_model ( data , minimum_non_zero_cdd , minimum_non_zero_hdd , minimum_total_cdd , minimum_total_hdd , beta_cdd_maximum_p_value , beta_hdd_maximum_p_value , weights_col , cooling_balance_point , heating_balance_point , ) for cooling_balance_point in cooling_balance_points for heating_balance_point in heating_balance_points if heating_balance_point <= cooling_balance_point ] return candidate_models
Return a list of candidate cdd_hdd models for a particular selection of cooling balance point and heating balance point
11,078
def select_best_candidate ( candidate_models ) : best_r_squared_adj = - np . inf best_candidate = None for candidate in candidate_models : if ( candidate . status == "QUALIFIED" and candidate . r_squared_adj > best_r_squared_adj ) : best_candidate = candidate best_r_squared_adj = candidate . r_squared_adj if best_candidate is None : warnings = [ EEMeterWarning ( qualified_name = "eemeter.caltrack_daily.select_best_candidate.no_candidates" , description = "No qualified model candidates available." , data = { "status_count:{}" . format ( status ) : count for status , count in Counter ( [ c . status for c in candidate_models ] ) . items ( ) } , ) ] return None , warnings return best_candidate , [ ]
Select and return the best candidate model based on r - squared and qualification .
11,079
def fit_caltrack_usage_per_day_model ( data , fit_cdd = True , use_billing_presets = False , minimum_non_zero_cdd = 10 , minimum_non_zero_hdd = 10 , minimum_total_cdd = 20 , minimum_total_hdd = 20 , beta_cdd_maximum_p_value = 1 , beta_hdd_maximum_p_value = 1 , weights_col = None , fit_intercept_only = True , fit_cdd_only = True , fit_hdd_only = True , fit_cdd_hdd = True , ) : if use_billing_presets : minimum_non_zero_cdd = 0 minimum_non_zero_hdd = 0 minimum_total_cdd = 20 minimum_total_hdd = 20 if weights_col is None : raise ValueError ( "If using billing presets, the weights_col argument must be specified." ) interval = "billing" else : interval = "daily" data = overwrite_partial_rows_with_nan ( data ) if data . dropna ( ) . empty : return CalTRACKUsagePerDayModelResults ( status = "NO DATA" , method_name = "caltrack_usage_per_day" , warnings = [ EEMeterWarning ( qualified_name = "eemeter.caltrack_usage_per_day.no_data" , description = ( "No data available. Cannot fit model." ) , data = { } , ) ] , ) candidates = [ ] if fit_intercept_only : candidates . extend ( get_intercept_only_candidate_models ( data , weights_col = weights_col ) ) if fit_hdd_only : candidates . extend ( get_hdd_only_candidate_models ( data = data , minimum_non_zero_hdd = minimum_non_zero_hdd , minimum_total_hdd = minimum_total_hdd , beta_hdd_maximum_p_value = beta_hdd_maximum_p_value , weights_col = weights_col , ) ) if fit_cdd : if fit_cdd_only : candidates . extend ( get_cdd_only_candidate_models ( data = data , minimum_non_zero_cdd = minimum_non_zero_cdd , minimum_total_cdd = minimum_total_cdd , beta_cdd_maximum_p_value = beta_cdd_maximum_p_value , weights_col = weights_col , ) ) if fit_cdd_hdd : candidates . extend ( get_cdd_hdd_candidate_models ( data = data , minimum_non_zero_cdd = minimum_non_zero_cdd , minimum_non_zero_hdd = minimum_non_zero_hdd , minimum_total_cdd = minimum_total_cdd , minimum_total_hdd = minimum_total_hdd , beta_cdd_maximum_p_value = beta_cdd_maximum_p_value , beta_hdd_maximum_p_value = beta_hdd_maximum_p_value , weights_col = weights_col , ) ) best_candidate , candidate_warnings = select_best_candidate ( candidates ) warnings = candidate_warnings if best_candidate is None : status = "NO MODEL" r_squared_adj = None else : status = "SUCCESS" r_squared_adj = best_candidate . r_squared_adj model_result = CalTRACKUsagePerDayModelResults ( status = status , method_name = "caltrack_usage_per_day" , interval = interval , model = best_candidate , candidates = candidates , r_squared_adj = r_squared_adj , warnings = warnings , settings = { "fit_cdd" : fit_cdd , "minimum_non_zero_cdd" : minimum_non_zero_cdd , "minimum_non_zero_hdd" : minimum_non_zero_hdd , "minimum_total_cdd" : minimum_total_cdd , "minimum_total_hdd" : minimum_total_hdd , "beta_cdd_maximum_p_value" : beta_cdd_maximum_p_value , "beta_hdd_maximum_p_value" : beta_hdd_maximum_p_value , } , ) if best_candidate is not None : if best_candidate . model_type in [ "cdd_hdd" ] : num_parameters = 2 elif best_candidate . model_type in [ "hdd_only" , "cdd_only" ] : num_parameters = 1 else : num_parameters = 0 predicted_avgs = _caltrack_predict_design_matrix ( best_candidate . model_type , best_candidate . model_params , data , input_averages = True , output_averages = True , ) model_result . avgs_metrics = ModelMetrics ( data . meter_value , predicted_avgs , num_parameters ) predicted_totals = _caltrack_predict_design_matrix ( best_candidate . model_type , best_candidate . model_params , data , input_averages = True , output_averages = False , ) days_per_period = day_counts ( data . index ) data_totals = data . meter_value * days_per_period model_result . totals_metrics = ModelMetrics ( data_totals , predicted_totals , num_parameters ) return model_result
CalTRACK daily and billing methods using a usage - per - day modeling strategy .
11,080
def plot_caltrack_candidate ( candidate , best = False , ax = None , title = None , figsize = None , temp_range = None , alpha = None , ** kwargs ) : try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( "matplotlib is required for plotting." ) if figsize is None : figsize = ( 10 , 4 ) if ax is None : fig , ax = plt . subplots ( figsize = figsize ) if candidate . status == "QUALIFIED" : color = "C2" elif candidate . status == "DISQUALIFIED" : color = "C3" else : return if best : color = "C1" alpha = 1 temp_min , temp_max = ( 30 , 90 ) if temp_range is None else temp_range temps = np . arange ( temp_min , temp_max ) data = { "n_days" : np . ones ( temps . shape ) } prediction_index = pd . date_range ( "2017-01-01T00:00:00Z" , periods = len ( temps ) , freq = "D" ) temps_hourly = pd . Series ( temps , index = prediction_index ) . resample ( "H" ) . ffill ( ) prediction = candidate . predict ( prediction_index , temps_hourly , "daily" ) . result . predicted_usage plot_kwargs = { "color" : color , "alpha" : alpha or 0.3 } plot_kwargs . update ( kwargs ) ax . plot ( temps , prediction , ** plot_kwargs ) if title is not None : ax . set_title ( title ) return ax
Plot a CalTRACK candidate model .
11,081
def plot ( self , ax = None , title = None , figsize = None , with_candidates = False , candidate_alpha = None , temp_range = None , ) : try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( "matplotlib is required for plotting." ) if figsize is None : figsize = ( 10 , 4 ) if ax is None : fig , ax = plt . subplots ( figsize = figsize ) if temp_range is None : temp_range = ( 20 , 90 ) if with_candidates : for candidate in self . candidates : candidate . plot ( ax = ax , temp_range = temp_range , alpha = candidate_alpha ) self . model . plot ( ax = ax , best = True , temp_range = temp_range ) if title is not None : ax . set_title ( title ) return ax
Plot a model fit .
11,082
def plot_time_series ( meter_data , temperature_data , ** kwargs ) : try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( "matplotlib is required for plotting." ) default_kwargs = { "figsize" : ( 16 , 4 ) } default_kwargs . update ( kwargs ) fig , ax1 = plt . subplots ( ** default_kwargs ) ax1 . plot ( meter_data . index , meter_data . value , color = "C0" , label = "Energy Use" , drawstyle = "steps-post" , ) ax1 . set_ylabel ( "Energy Use" ) ax2 = ax1 . twinx ( ) ax2 . plot ( temperature_data . index , temperature_data , color = "C1" , label = "Temperature" , alpha = 0.8 , ) ax2 . set_ylabel ( "Temperature" ) fig . legend ( ) return ax1 , ax2
Plot meter and temperature data in dual - axes time series .
11,083
def plot_energy_signature ( meter_data , temperature_data , temp_col = None , ax = None , title = None , figsize = None , ** kwargs ) : try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( "matplotlib is required for plotting." ) temperature_mean = compute_temperature_features ( meter_data . index , temperature_data ) usage_per_day = compute_usage_per_day_feature ( meter_data , series_name = "meter_value" ) df = merge_features ( [ usage_per_day , temperature_mean . temperature_mean ] ) if figsize is None : figsize = ( 10 , 4 ) if ax is None : fig , ax = plt . subplots ( figsize = figsize ) if temp_col is None : temp_col = "temperature_mean" ax . scatter ( df [ temp_col ] , df . meter_value , ** kwargs ) ax . set_xlabel ( "Temperature" ) ax . set_ylabel ( "Energy Use per Day" ) if title is not None : ax . set_title ( title ) return ax
Plot meter and temperature data in energy signature .
11,084
def meter_data_from_csv ( filepath_or_buffer , tz = None , start_col = "start" , value_col = "value" , gzipped = False , freq = None , ** kwargs ) : read_csv_kwargs = { "usecols" : [ start_col , value_col ] , "dtype" : { value_col : np . float64 } , "parse_dates" : [ start_col ] , "index_col" : start_col , } if gzipped : read_csv_kwargs . update ( { "compression" : "gzip" } ) read_csv_kwargs . update ( kwargs ) df = pd . read_csv ( filepath_or_buffer , ** read_csv_kwargs ) . tz_localize ( "UTC" ) if tz is not None : df = df . tz_convert ( tz ) if freq == "hourly" : df = df . resample ( "H" ) . sum ( ) elif freq == "daily" : df = df . resample ( "D" ) . sum ( ) return df
Load meter data from a CSV file .
11,085
def temperature_data_from_csv ( filepath_or_buffer , tz = None , date_col = "dt" , temp_col = "tempF" , gzipped = False , freq = None , ** kwargs ) : read_csv_kwargs = { "usecols" : [ date_col , temp_col ] , "dtype" : { temp_col : np . float64 } , "parse_dates" : [ date_col ] , "index_col" : date_col , } if gzipped : read_csv_kwargs . update ( { "compression" : "gzip" } ) read_csv_kwargs . update ( kwargs ) if tz is None : tz = "UTC" df = pd . read_csv ( filepath_or_buffer , ** read_csv_kwargs ) . tz_localize ( tz ) if freq == "hourly" : df = df . resample ( "H" ) . sum ( ) return df [ temp_col ]
Load temperature data from a CSV file .
11,086
def meter_data_from_json ( data , orient = "list" ) : if orient == "list" : df = pd . DataFrame ( data , columns = [ "start" , "value" ] ) df [ "start" ] = pd . DatetimeIndex ( df . start ) . tz_localize ( "UTC" ) df = df . set_index ( "start" ) return df else : raise ValueError ( "orientation not recognized." )
Load meter data from json .
11,087
def notify ( self ) : if flask . has_request_context ( ) : emit ( _NAME + str ( self . _uuid ) ) else : sio = flask . current_app . extensions [ 'socketio' ] sio . emit ( _NAME + str ( self . _uuid ) ) eventlet . sleep ( )
Notify the client .
11,088
def validate ( key ) : if not isinstance ( key , ( str , bytes ) ) : raise KeyError ( 'Key must be of type str or bytes, found type {}' . format ( type ( key ) ) )
Check that the key is a string or bytestring .
11,089
def pack ( x : Any ) -> bytes : try : return msgpack . packb ( x , default = encoders ) except TypeError as exc : message = ( 'Serialization error, check the data passed to a do_ command. ' 'Cannot serialize this object:\n' ) + str ( exc ) [ 16 : ] raise SerializationError ( message )
Encode x into msgpack with additional encoders .
11,090
def make_event ( event : Callable ) -> Callable : @ property @ wraps ( event ) def actualevent ( self ) : name = event . __name__ [ 3 : ] try : getter = event ( self ) . __name__ except AttributeError : getter = None return Event ( name , self . _uuid , getter ) return actualevent
Create an event from a method signature .
11,091
def _insert ( wrap : str , tag : Optional [ str ] ) -> str : if tag is None : raise ValueError ( 'tag cannot be None' ) formatter = string . Formatter ( ) mapping = FormatDict ( component = tag ) return formatter . vformat ( wrap , ( ) , mapping )
Insert the component tag into the wrapper html .
11,092
def do_options ( self , labels , values ) : return [ dict ( label = l , value = v ) for l , v in zip ( labels , values ) ]
Replace the drop down fields .
11,093
def do_options ( self , labels : Sequence [ str ] , values : Sequence [ Union [ str , int ] ] ) -> Sequence [ Dict ] : return [ { 'label' : label , 'value' : value } for label , value in zip ( labels , values ) ]
Replace the checkbox options .
11,094
def do_options ( self , labels , values ) : return [ { 'label' : label , 'value' : value } for label , value in zip ( labels , values ) ]
Replace the radio button options .
11,095
def node_version ( ) : version = check_output ( ( 'node' , '--version' ) ) return tuple ( int ( x ) for x in version . strip ( ) [ 1 : ] . split ( b'.' ) )
Get node version .
11,096
def run ( self ) : ret = eventlet . spawn ( self . context ( self . func ) ) eventlet . sleep ( self . seconds ) try : ret . wait ( ) except Exception : traceback . print_exc ( ) self . thread = eventlet . spawn ( self . run )
Invoke the function repeatedly on a timer .
11,097
def overlap ( self , other : 'Span' ) : return not ( other . column_end <= self . column_start or self . column_end <= other . column_start or other . row_end <= self . row_start or self . row_end <= other . row_start )
Detect if two spans overlap .
11,098
def cells ( self ) -> Generator [ Tuple [ int , int ] , None , None ] : yield from itertools . product ( range ( self . row_start , self . row_end ) , range ( self . column_start , self . column_end ) )
Generate cells in span .
11,099
def pixels ( self , value : float ) -> 'Size' : raise_not_number ( value ) self . maximum = '{}px' . format ( value ) return self
Set the size in pixels .