idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
18,000
def pool_versions ( self , updater_id = None , updater_name = None , pool_id = None ) : params = { 'search[updater_id]' : updater_id , 'search[updater_name]' : updater_name , 'search[pool_id]' : pool_id } return self . _get ( 'pool_versions.json' , params )
Get list of pool versions .
18,001
def tag_aliases ( self , name_matches = None , antecedent_name = None , tag_id = None ) : params = { 'search[name_matches]' : name_matches , 'search[antecedent_name]' : antecedent_name , 'search[id]' : tag_id } return self . _get ( 'tag_aliases.json' , params )
Get tags aliases .
18,002
def tag_implications ( self , name_matches = None , antecedent_name = None , tag_id = None ) : params = { 'search[name_matches]' : name_matches , 'search[antecedent_name]' : antecedent_name , 'search[id]' : tag_id } return self . _get ( 'tag_implications.json' , params )
Get tags implications .
18,003
def tag_related ( self , query , category = None ) : params = { 'query' : query , 'category' : category } return self . _get ( 'related_tag.json' , params )
Get related tags .
18,004
def wiki_list ( self , title = None , creator_id = None , body_matches = None , other_names_match = None , creator_name = None , hide_deleted = None , other_names_present = None , order = None ) : params = { 'search[title]' : title , 'search[creator_id]' : creator_id , 'search[body_matches]' : body_matches , 'search[other_names_match]' : other_names_match , 'search[creator_name]' : creator_name , 'search[hide_deleted]' : hide_deleted , 'search[other_names_present]' : other_names_present , 'search[order]' : order } return self . _get ( 'wiki_pages.json' , params )
Function to retrieves a list of every wiki page .
18,005
def wiki_versions_list ( self , page_id , updater_id ) : params = { 'earch[updater_id]' : updater_id , 'search[wiki_page_id]' : page_id } return self . _get ( 'wiki_page_versions.json' , params )
Return a list of wiki page version .
18,006
def forum_topic_list ( self , title_matches = None , title = None , category_id = None ) : params = { 'search[title_matches]' : title_matches , 'search[title]' : title , 'search[category_id]' : category_id } return self . _get ( 'forum_topics.json' , params )
Function to get forum topics .
18,007
def forum_post_list ( self , creator_id = None , creator_name = None , topic_id = None , topic_title_matches = None , topic_category_id = None , body_matches = None ) : params = { 'search[creator_id]' : creator_id , 'search[creator_name]' : creator_name , 'search[topic_id]' : topic_id , 'search[topic_title_matches]' : topic_title_matches , 'search[topic_category_id]' : topic_category_id , 'search[body_matches]' : body_matches } return self . _get ( 'forum_posts.json' , params )
Return a list of forum posts .
18,008
def site_name ( self , site_name ) : if site_name in SITE_LIST : self . __site_name = site_name self . __site_url = SITE_LIST [ site_name ] [ 'url' ] else : raise PybooruError ( "The 'site_name' is not valid, specify a valid 'site_name'." )
Function that sets and checks the site name and set url .
18,009
def site_url ( self , url ) : regex = re . compile ( r'^(?:http|https)://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' r'(?::\d+)?' r'(?:/?|[/?]\S+)$' , re . IGNORECASE ) if re . match ( '^(?:http|https)://' , url ) : if re . search ( regex , url ) : self . __site_url = url else : raise PybooruError ( "Invalid URL: {0}" . format ( url ) ) else : raise PybooruError ( "Invalid URL scheme, use HTTP or HTTPS: {0}" . format ( url ) )
URL setter and validator for site_url property .
18,010
def _request ( self , url , api_call , request_args , method = 'GET' ) : try : if method != 'GET' : self . client . headers . update ( { 'content-type' : None } ) response = self . client . request ( method , url , ** request_args ) self . last_call . update ( { 'API' : api_call , 'url' : response . url , 'status_code' : response . status_code , 'status' : self . _get_status ( response . status_code ) , 'headers' : response . headers } ) if response . status_code in ( 200 , 201 , 202 , 204 ) : return response . json ( ) raise PybooruHTTPError ( "In _request" , response . status_code , response . url ) except requests . exceptions . Timeout : raise PybooruError ( "Timeout! url: {0}" . format ( response . url ) ) except ValueError as e : raise PybooruError ( "JSON Error: {0} in line {1} column {2}" . format ( e . msg , e . lineno , e . colno ) )
Function to request and returning JSON data .
18,011
def post_update ( self , post_id , tags = None , file_ = None , rating = None , source = None , is_rating_locked = None , is_note_locked = None , parent_id = None ) : params = { 'id' : post_id , 'post[tags]' : tags , 'post[rating]' : rating , 'post[source]' : source , 'post[is_rating_locked]' : is_rating_locked , 'post[is_note_locked]' : is_note_locked , 'post[parent_id]' : parent_id } if file_ is not None : file_ = { 'post[file]' : open ( file_ , 'rb' ) } return self . _get ( 'post/update' , params , 'PUT' , file_ ) else : return self . _get ( 'post/update' , params , 'PUT' )
Update a specific post .
18,012
def site_name ( self , site_name ) : _Pybooru . site_name . fset ( self , site_name ) if ( 'api_version' and 'hashed_string' ) in SITE_LIST [ site_name ] : self . api_version = SITE_LIST [ site_name ] [ 'api_version' ] self . hash_string = SITE_LIST [ site_name ] [ 'hashed_string' ]
Sets api_version and hash_string .
18,013
def _build_url ( self , api_call ) : if self . api_version in ( '1.13.0' , '1.13.0+update.1' , '1.13.0+update.2' ) : if '/' not in api_call : return "{0}/{1}/index.json" . format ( self . site_url , api_call ) return "{0}/{1}.json" . format ( self . site_url , api_call )
Build request url .
18,014
def _build_hash_string ( self ) : if self . site_name in SITE_LIST or self . hash_string : if self . username and self . password : try : hash_string = self . hash_string . format ( self . password ) except TypeError : raise PybooruError ( "Pybooru can't add 'password' " "to 'hash_string'" ) self . password_hash = hashlib . sha1 ( hash_string . encode ( 'utf-8' ) ) . hexdigest ( ) else : raise PybooruError ( "Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute." ) else : raise PybooruError ( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login." )
Function for build password hash string .
18,015
def _is_autonomous ( indep , exprs ) : if indep is None : return True for expr in exprs : try : in_there = indep in expr . free_symbols except : in_there = expr . has ( indep ) if in_there : return False return True
Whether the expressions for the dependent variables are autonomous .
18,016
def symmetricsys ( dep_tr = None , indep_tr = None , SuperClass = TransformedSys , ** kwargs ) : if dep_tr is not None : if not callable ( dep_tr [ 0 ] ) or not callable ( dep_tr [ 1 ] ) : raise ValueError ( "Exceptected dep_tr to be a pair of callables" ) if indep_tr is not None : if not callable ( indep_tr [ 0 ] ) or not callable ( indep_tr [ 1 ] ) : raise ValueError ( "Exceptected indep_tr to be a pair of callables" ) class _SymmetricSys ( SuperClass ) : def __init__ ( self , dep_exprs , indep = None , ** inner_kwargs ) : new_kwargs = kwargs . copy ( ) new_kwargs . update ( inner_kwargs ) dep , exprs = zip ( * dep_exprs ) super ( _SymmetricSys , self ) . __init__ ( zip ( dep , exprs ) , indep , dep_transf = list ( zip ( list ( map ( dep_tr [ 0 ] , dep ) ) , list ( map ( dep_tr [ 1 ] , dep ) ) ) ) if dep_tr is not None else None , indep_transf = ( ( indep_tr [ 0 ] ( indep ) , indep_tr [ 1 ] ( indep ) ) if indep_tr is not None else None ) , ** new_kwargs ) @ classmethod def from_callback ( cls , cb , ny = None , nparams = None , ** inner_kwargs ) : new_kwargs = kwargs . copy ( ) new_kwargs . update ( inner_kwargs ) return SuperClass . from_callback ( cb , ny , nparams , dep_transf_cbs = repeat ( dep_tr ) if dep_tr is not None else None , indep_transf_cbs = indep_tr , ** new_kwargs ) return _SymmetricSys
A factory function for creating symmetrically transformed systems .
18,017
def from_other ( cls , ori , ** kwargs ) : for k in cls . _attrs_to_copy + ( 'params' , 'roots' , 'init_indep' , 'init_dep' ) : if k not in kwargs : val = getattr ( ori , k ) if val is not None : kwargs [ k ] = val if 'lower_bounds' not in kwargs and getattr ( ori , 'lower_bounds' ) is not None : kwargs [ 'lower_bounds' ] = ori . lower_bounds if 'upper_bounds' not in kwargs and getattr ( ori , 'upper_bounds' ) is not None : kwargs [ 'upper_bounds' ] = ori . upper_bounds if len ( ori . pre_processors ) > 0 : if 'pre_processors' not in kwargs : kwargs [ 'pre_processors' ] = [ ] kwargs [ 'pre_processors' ] = kwargs [ 'pre_processors' ] + ori . pre_processors if len ( ori . post_processors ) > 0 : if 'post_processors' not in kwargs : kwargs [ 'post_processors' ] = [ ] kwargs [ 'post_processors' ] = ori . post_processors + kwargs [ 'post_processors' ] if 'dep_exprs' not in kwargs : kwargs [ 'dep_exprs' ] = zip ( ori . dep , ori . exprs ) if 'indep' not in kwargs : kwargs [ 'indep' ] = ori . indep instance = cls ( ** kwargs ) for attr in ori . _attrs_to_copy : if attr not in cls . _attrs_to_copy : setattr ( instance , attr , getattr ( ori , attr ) ) return instance
Creates a new instance with an existing one as a template .
18,018
def get_jac ( self ) : if self . _jac is True : if self . sparse is True : self . _jac , self . _colptrs , self . _rowvals = self . be . sparse_jacobian_csc ( self . exprs , self . dep ) elif self . band is not None : self . _jac = self . be . banded_jacobian ( self . exprs , self . dep , * self . band ) else : f = self . be . Matrix ( 1 , self . ny , self . exprs ) self . _jac = f . jacobian ( self . be . Matrix ( 1 , self . ny , self . dep ) ) elif self . _jac is False : return False return self . _jac
Derives the jacobian from self . exprs and self . dep .
18,019
def get_jtimes ( self ) : if self . _jtimes is False : return False if self . _jtimes is True : r = self . be . Dummy ( 'r' ) v = tuple ( self . be . Dummy ( 'v_{0}' . format ( i ) ) for i in range ( self . ny ) ) f = self . be . Matrix ( 1 , self . ny , self . exprs ) f = f . subs ( [ ( x_i , x_i + r * v_i ) for x_i , v_i in zip ( self . dep , v ) ] ) return v , self . be . flatten ( f . diff ( r ) . subs ( r , 0 ) ) else : return tuple ( zip ( * self . _jtimes ) )
Derive the jacobian - vector product from self . exprs and self . dep
18,020
def jacobian_singular ( self ) : cses , ( jac_in_cses , ) = self . be . cse ( self . get_jac ( ) ) if jac_in_cses . nullspace ( ) : return True else : return False
Returns True if Jacobian is singular else False .
18,021
def get_dfdx ( self ) : if self . _dfdx is True : if self . indep is None : zero = 0 * self . be . Dummy ( ) ** 0 self . _dfdx = self . be . Matrix ( 1 , self . ny , [ zero ] * self . ny ) else : self . _dfdx = self . be . Matrix ( 1 , self . ny , [ expr . diff ( self . indep ) for expr in self . exprs ] ) elif self . _dfdx is False : return False return self . _dfdx
Calculates 2nd derivatives of self . exprs
18,022
def get_f_ty_callback ( self ) : cb = self . _callback_factory ( self . exprs ) lb = self . lower_bounds ub = self . upper_bounds if lb is not None or ub is not None : def _bounds_wrapper ( t , y , p = ( ) , be = None ) : if lb is not None : if np . any ( y < lb - 10 * self . _current_integration_kwargs [ 'atol' ] ) : raise RecoverableError y = np . array ( y ) y [ y < lb ] = lb [ y < lb ] if ub is not None : if np . any ( y > ub + 10 * self . _current_integration_kwargs [ 'atol' ] ) : raise RecoverableError y = np . array ( y ) y [ y > ub ] = ub [ y > ub ] return cb ( t , y , p , be ) return _bounds_wrapper else : return cb
Generates a callback for evaluating self . exprs .
18,023
def get_j_ty_callback ( self ) : j_exprs = self . get_jac ( ) if j_exprs is False : return None cb = self . _callback_factory ( j_exprs ) if self . sparse : from scipy . sparse import csc_matrix def sparse_cb ( x , y , p = ( ) ) : data = cb ( x , y , p ) . flatten ( ) return csc_matrix ( ( data , self . _rowvals , self . _colptrs ) ) return sparse_cb else : return cb
Generates a callback for evaluating the jacobian .
18,024
def get_dfdx_callback ( self ) : dfdx_exprs = self . get_dfdx ( ) if dfdx_exprs is False : return None return self . _callback_factory ( dfdx_exprs )
Generate a callback for evaluating derivative of self . exprs
18,025
def get_jtimes_callback ( self ) : jtimes = self . get_jtimes ( ) if jtimes is False : return None v , jtimes_exprs = jtimes return _Callback ( self . indep , tuple ( self . dep ) + tuple ( v ) , self . params , jtimes_exprs , Lambdify = self . be . Lambdify )
Generate a callback fro evaluating the jacobian - vector product .
18,026
def from_linear_invariants ( cls , ori_sys , preferred = None , ** kwargs ) : _be = ori_sys . be A = _be . Matrix ( ori_sys . linear_invariants ) rA , pivots = A . rref ( ) if len ( pivots ) < A . shape [ 0 ] : raise NotImplementedError ( "Linear invariants contain linear dependencies." ) per_row_cols = [ ( ri , [ ci for ci in range ( A . cols ) if A [ ri , ci ] != 0 ] ) for ri in range ( A . rows ) ] if preferred is None : preferred = ori_sys . names [ : A . rows ] if ori_sys . dep_by_name else list ( range ( A . rows ) ) targets = [ ori_sys . names . index ( dep ) if ori_sys . dep_by_name else ( dep if isinstance ( dep , int ) else ori_sys . dep . index ( dep ) ) for dep in preferred ] row_tgt = [ ] for ri , colids in sorted ( per_row_cols , key = lambda k : len ( k [ 1 ] ) ) : for tgt in targets : if tgt in colids : row_tgt . append ( ( ri , tgt ) ) targets . remove ( tgt ) break if len ( targets ) == 0 : break else : raise ValueError ( "Could not find a solutions for: %s" % targets ) def analytic_factory ( x0 , y0 , p0 , be ) : return { ori_sys . dep [ tgt ] : y0 [ ori_sys . dep [ tgt ] if ori_sys . dep_by_name else tgt ] - sum ( [ A [ ri , ci ] * ( ori_sys . dep [ ci ] - y0 [ ori_sys . dep [ ci ] if ori_sys . dep_by_name else ci ] ) for ci in range ( A . cols ) if ci != tgt ] ) / A [ ri , tgt ] for ri , tgt in row_tgt } ori_li_nms = ori_sys . linear_invariant_names or ( ) new_lin_invar = [ [ cell for ci , cell in enumerate ( row ) if ci not in list ( zip ( * row_tgt ) ) [ 1 ] ] for ri , row in enumerate ( A . tolist ( ) ) if ri not in list ( zip ( * row_tgt ) ) [ 0 ] ] new_lin_i_nms = [ nam for ri , nam in enumerate ( ori_li_nms ) if ri not in list ( zip ( * row_tgt ) ) [ 0 ] ] return cls ( ori_sys , analytic_factory , linear_invariants = new_lin_invar , linear_invariant_names = new_lin_i_nms , ** kwargs )
Reformulates the ODE system in fewer variables .
18,027
def chained_parameter_variation ( subject , durations , y0 , varied_params , default_params = None , integrate_kwargs = None , x0 = None , npoints = 1 , numpy = None ) : assert len ( durations ) > 0 , 'need at least 1 duration (preferably many)' assert npoints > 0 , 'need at least 1 point per duration' for k , v in varied_params . items ( ) : if len ( v ) != len ( durations ) : raise ValueError ( "Mismathced lengths of durations and varied_params" ) if isinstance ( subject , ODESys ) : integrate = subject . integrate numpy = numpy or subject . numpy else : integrate = subject numpy = numpy or np default_params = default_params or { } integrate_kwargs = integrate_kwargs or { } def _get_idx ( cont , idx ) : if isinstance ( cont , dict ) : return { k : ( v [ idx ] if hasattr ( v , '__len__' ) and getattr ( v , 'ndim' , 1 ) > 0 else v ) for k , v in cont . items ( ) } else : return cont [ idx ] durations = numpy . cumsum ( durations ) for idx_dur in range ( len ( durations ) ) : params = copy . copy ( default_params ) for k , v in varied_params . items ( ) : params [ k ] = v [ idx_dur ] if idx_dur == 0 : if x0 is None : x0 = durations [ 0 ] * 0 out = integrate ( numpy . linspace ( x0 , durations [ 0 ] , npoints + 1 ) , y0 , params , ** integrate_kwargs ) else : if isinstance ( out , Result ) : out . extend_by_integration ( durations [ idx_dur ] , params , npoints = npoints , ** integrate_kwargs ) else : for idx_res , r in enumerate ( out ) : r . extend_by_integration ( durations [ idx_dur ] , _get_idx ( params , idx_res ) , npoints = npoints , ** integrate_kwargs ) return out
Integrate an ODE - system for a serie of durations with some parameters changed in - between
18,028
def pre_process ( self , xout , y0 , params = ( ) ) : for pre_processor in self . pre_processors : xout , y0 , params = pre_processor ( xout , y0 , params ) return [ self . numpy . atleast_1d ( arr ) for arr in ( xout , y0 , params ) ]
Transforms input to internal values used internally .
18,029
def post_process ( self , xout , yout , params ) : for post_processor in self . post_processors : xout , yout , params = post_processor ( xout , yout , params ) return xout , yout , params
Transforms internal values to output used internally .
18,030
def adaptive ( self , y0 , x0 , xend , params = ( ) , ** kwargs ) : return self . integrate ( ( x0 , xend ) , y0 , params = params , ** kwargs )
Integrate with integrator chosen output .
18,031
def predefined ( self , y0 , xout , params = ( ) , ** kwargs ) : xout , yout , info = self . integrate ( xout , y0 , params = params , force_predefined = True , ** kwargs ) return yout , info
Integrate with user chosen output .
18,032
def integrate ( self , x , y0 , params = ( ) , atol = 1e-8 , rtol = 1e-8 , ** kwargs ) : arrs = self . to_arrays ( x , y0 , params ) _x , _y , _p = _arrs = self . pre_process ( * arrs ) ndims = [ a . ndim for a in _arrs ] if ndims == [ 1 , 1 , 1 ] : twodim = False elif ndims == [ 2 , 2 , 2 ] : twodim = True else : raise ValueError ( "Pre-processor made ndims inconsistent?" ) if self . append_iv : _p = self . numpy . concatenate ( ( _p , _y ) , axis = - 1 ) if hasattr ( self , 'ny' ) : if _y . shape [ - 1 ] != self . ny : raise ValueError ( "Incorrect shape of intern_y0" ) if isinstance ( atol , dict ) : kwargs [ 'atol' ] = [ atol [ k ] for k in self . names ] else : kwargs [ 'atol' ] = atol kwargs [ 'rtol' ] = rtol integrator = kwargs . pop ( 'integrator' , None ) if integrator is None : integrator = os . environ . get ( 'PYODESYS_INTEGRATOR' , 'scipy' ) args = tuple ( map ( self . numpy . atleast_2d , ( _x , _y , _p ) ) ) self . _current_integration_kwargs = kwargs if isinstance ( integrator , str ) : nfo = getattr ( self , '_integrate_' + integrator ) ( * args , ** kwargs ) else : kwargs [ 'with_jacobian' ] = getattr ( integrator , 'with_jacobian' , None ) nfo = self . _integrate ( integrator . integrate_adaptive , integrator . integrate_predefined , * args , ** kwargs ) if twodim : _xout = [ d [ 'internal_xout' ] for d in nfo ] _yout = [ d [ 'internal_yout' ] for d in nfo ] _params = [ d [ 'internal_params' ] for d in nfo ] res = [ Result ( * ( self . post_process ( _xout [ i ] , _yout [ i ] , _params [ i ] ) + ( nfo [ i ] , self ) ) ) for i in range ( len ( nfo ) ) ] else : _xout = nfo [ 0 ] [ 'internal_xout' ] _yout = nfo [ 0 ] [ 'internal_yout' ] self . _internal = _xout . copy ( ) , _yout . copy ( ) , _p . copy ( ) nfo = nfo [ 0 ] res = Result ( * ( self . post_process ( _xout , _yout , _p ) + ( nfo , self ) ) ) return res
Integrate the system of ordinary differential equations .
18,033
def plot_phase_plane ( self , indices = None , ** kwargs ) : return self . _plot ( plot_phase_plane , indices = indices , ** kwargs )
Plots a phase portrait from last integration .
18,034
def user_can_edit_news ( user ) : newsitem_models = [ model . get_newsitem_model ( ) for model in NEWSINDEX_MODEL_CLASSES ] if user . is_active and user . is_superuser : return bool ( newsitem_models ) for NewsItem in newsitem_models : for perm in format_perms ( NewsItem , [ 'add' , 'change' , 'delete' ] ) : if user . has_perm ( perm ) : return True return False
Check if the user has permission to edit any of the registered NewsItem types .
18,035
def user_can_edit_newsitem ( user , NewsItem ) : for perm in format_perms ( NewsItem , [ 'add' , 'change' , 'delete' ] ) : if user . has_perm ( perm ) : return True return False
Check if the user has permission to edit a particular NewsItem type .
18,036
def get_date_or_404 ( year , month , day ) : try : return datetime . date ( int ( year ) , int ( month ) , int ( day ) ) except ValueError : raise Http404
Try to make a date from the given inputs raising Http404 on error
18,037
def respond ( self , request , view , newsitems , extra_context = { } ) : context = self . get_context ( request , view = view ) context . update ( self . paginate_newsitems ( request , newsitems ) ) context . update ( extra_context ) template = self . get_template ( request , view = view ) return TemplateResponse ( request , template , context )
A helper that takes some news items and returns an HttpResponse
18,038
def get_newsitem_model ( model_string ) : try : NewsItem = apps . get_model ( model_string ) assert issubclass ( NewsItem , AbstractNewsItem ) except ( ValueError , LookupError , AssertionError ) : raise ValueError ( 'Invalid news item model string' . format ( model_string ) ) return NewsItem
Get the NewsItem model from a model string . Raises ValueError if the model string is invalid or references a model that is not a NewsItem .
18,039
def from_quad_tree ( cls , quad_tree ) : assert bool ( re . match ( '^[0-3]*$' , quad_tree ) ) , 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len ( str ( quad_tree ) ) offset = int ( math . pow ( 2 , zoom ) ) - 1 google_x , google_y = [ reduce ( lambda result , bit : ( result << 1 ) | bit , bits , 0 ) for bits in zip ( * ( reversed ( divmod ( digit , 2 ) ) for digit in ( int ( c ) for c in str ( quad_tree ) ) ) ) ] return cls ( tms_x = google_x , tms_y = ( offset - google_y ) , zoom = zoom )
Creates a tile from a Microsoft QuadTree
18,040
def from_google ( cls , google_x , google_y , zoom ) : max_tile = ( 2 ** zoom ) - 1 assert 0 <= google_x <= max_tile , 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile , 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls ( tms_x = google_x , tms_y = ( 2 ** zoom - 1 ) - google_y , zoom = zoom )
Creates a tile from Google format X Y and zoom
18,041
def for_point ( cls , point , zoom ) : latitude , longitude = point . latitude_longitude return cls . for_latitude_longitude ( latitude = latitude , longitude = longitude , zoom = zoom )
Creates a tile for given point
18,042
def quad_tree ( self ) : value = '' tms_x , tms_y = self . tms tms_y = ( 2 ** self . zoom - 1 ) - tms_y for i in range ( self . zoom , 0 , - 1 ) : digit = 0 mask = 1 << ( i - 1 ) if ( tms_x & mask ) != 0 : digit += 1 if ( tms_y & mask ) != 0 : digit += 2 value += str ( digit ) return value
Gets the tile in the Microsoft QuadTree format converted from TMS
18,043
def google ( self ) : tms_x , tms_y = self . tms return tms_x , ( 2 ** self . zoom - 1 ) - tms_y
Gets the tile in the Google format converted from TMS
18,044
def bounds ( self ) : google_x , google_y = self . google pixel_x_west , pixel_y_north = google_x * TILE_SIZE , google_y * TILE_SIZE pixel_x_east , pixel_y_south = ( google_x + 1 ) * TILE_SIZE , ( google_y + 1 ) * TILE_SIZE point_min = Point . from_pixel ( pixel_x = pixel_x_west , pixel_y = pixel_y_south , zoom = self . zoom ) point_max = Point . from_pixel ( pixel_x = pixel_x_east , pixel_y = pixel_y_north , zoom = self . zoom ) return point_min , point_max
Gets the bounds of a tile represented as the most west and south point and the most east and north point
18,045
def read_ix ( ix , ** kwargs ) : if not isinstance ( ix , ixmp . TimeSeries ) : error = 'not recognized as valid ixmp class: {}' . format ( ix ) raise ValueError ( error ) df = ix . timeseries ( iamc = False , ** kwargs ) df [ 'model' ] = ix . model df [ 'scenario' ] = ix . scenario return df , 'year' , [ ]
Read timeseries data from an ixmp object
18,046
def requires_package ( pkg , msg , error_type = ImportError ) : def _requires_package ( func ) : def wrapper ( * args , ** kwargs ) : if pkg is None : raise error_type ( msg ) return func ( * args , ** kwargs ) return wrapper return _requires_package
Decorator when a function requires an optional dependency
18,047
def write_sheet ( writer , name , df , index = False ) : if index : df = df . reset_index ( ) df . to_excel ( writer , name , index = False ) worksheet = writer . sheets [ name ] for i , col in enumerate ( df . columns ) : if df . dtypes [ col ] . name . startswith ( ( 'float' , 'int' ) ) : width = len ( str ( col ) ) + 2 else : width = max ( [ df [ col ] . map ( lambda x : len ( str ( x or 'None' ) ) ) . max ( ) , len ( col ) ] ) + 2 xls_col = '{c}:{c}' . format ( c = NUMERIC_TO_STR [ i ] ) worksheet . set_column ( xls_col , width )
Write a pandas DataFrame to an ExcelWriter auto - formatting column width depending on maxwidth of data and colum header
18,048
def read_pandas ( fname , * args , ** kwargs ) : if not os . path . exists ( fname ) : raise ValueError ( 'no data file `{}` found!' . format ( fname ) ) if fname . endswith ( 'csv' ) : df = pd . read_csv ( fname , * args , ** kwargs ) else : xl = pd . ExcelFile ( fname ) if len ( xl . sheet_names ) > 1 and 'sheet_name' not in kwargs : kwargs [ 'sheet_name' ] = 'data' df = pd . read_excel ( fname , * args , ** kwargs ) return df
Read a file and return a pd . DataFrame
18,049
def sort_data ( data , cols ) : return data . sort_values ( cols ) [ cols + [ 'value' ] ] . reset_index ( drop = True )
Sort data rows and order columns
18,050
def _escape_regexp ( s ) : return ( str ( s ) . replace ( '|' , '\\|' ) . replace ( '.' , '\.' ) . replace ( '*' , '.*' ) . replace ( '+' , '\+' ) . replace ( '(' , '\(' ) . replace ( ')' , '\)' ) . replace ( '$' , '\\$' ) )
escape characters with specific regexp use
18,051
def years_match ( data , years ) : years = [ years ] if isinstance ( years , int ) else years dt = datetime . datetime if isinstance ( years , dt ) or isinstance ( years [ 0 ] , dt ) : error_msg = "`year` can only be filtered with ints or lists of ints" raise TypeError ( error_msg ) return data . isin ( years )
matching of year columns for data filtering
18,052
def hour_match ( data , hours ) : hours = [ hours ] if isinstance ( hours , int ) else hours return data . isin ( hours )
matching of days in time columns for data filtering
18,053
def datetime_match ( data , dts ) : dts = dts if islistable ( dts ) else [ dts ] if any ( [ not isinstance ( i , datetime . datetime ) for i in dts ] ) : error_msg = ( "`time` can only be filtered by datetimes" ) raise TypeError ( error_msg ) return data . isin ( dts )
matching of datetimes in time columns for data filtering
18,054
def to_int ( x , index = False ) : _x = x . index if index else x cols = list ( map ( int , _x ) ) error = _x [ cols != _x ] if not error . empty : raise ValueError ( 'invalid values `{}`' . format ( list ( error ) ) ) if index : x . index = cols return x else : return _x
Formatting series or timeseries columns to int and checking validity . If index = False the function works on the pd . Series x ; else the function casts the index of x to int and returns x with a new index .
18,055
def concat_with_pipe ( x , cols = None ) : cols = cols or x . index return '|' . join ( [ x [ i ] for i in cols if x [ i ] not in [ None , np . nan ] ] )
Concatenate a pd . Series separated by | drop None or np . nan
18,056
def _make_index ( df , cols = META_IDX ) : return pd . MultiIndex . from_tuples ( pd . unique ( list ( zip ( * [ df [ col ] for col in cols ] ) ) ) , names = tuple ( cols ) )
Create an index from the columns of a dataframe
18,057
def check_aggregate ( df , variable , components = None , exclude_on_fail = False , multiplier = 1 , ** kwargs ) : fdf = df . filter ( ** kwargs ) if len ( fdf . data ) > 0 : vdf = fdf . check_aggregate ( variable = variable , components = components , exclude_on_fail = exclude_on_fail , multiplier = multiplier ) df . meta [ 'exclude' ] |= fdf . meta [ 'exclude' ] return vdf
Check whether the timeseries values match the aggregation of sub - categories
18,058
def filter_by_meta ( data , df , join_meta = False , ** kwargs ) : if not set ( META_IDX ) . issubset ( data . index . names + list ( data . columns ) ) : raise ValueError ( 'missing required index dimensions or columns!' ) meta = pd . DataFrame ( df . meta [ list ( set ( kwargs ) - set ( META_IDX ) ) ] . copy ( ) ) keep = np . array ( [ True ] * len ( meta ) ) apply_filter = False for col , values in kwargs . items ( ) : if col in META_IDX and values is not None : _col = meta . index . get_level_values ( 0 if col is 'model' else 1 ) keep &= pattern_match ( _col , values , has_nan = False ) apply_filter = True elif values is not None : keep &= pattern_match ( meta [ col ] , values ) apply_filter |= values is not None meta = meta [ keep ] data = data . copy ( ) idx = list ( data . index . names ) if not data . index . names == [ None ] else None data = data . reset_index ( ) . set_index ( META_IDX ) meta = meta . loc [ meta . index . intersection ( data . index ) ] meta . index . names = META_IDX if apply_filter : data = data . loc [ meta . index ] data . index . names = META_IDX data = data . join ( meta ) if join_meta else data data = data . reset_index ( ) . set_index ( idx or 'index' ) if idx is None : data . index . name = None return data
Filter by and join meta columns from an IamDataFrame to a pd . DataFrame
18,059
def compare ( left , right , left_label = 'left' , right_label = 'right' , drop_close = True , ** kwargs ) : ret = pd . concat ( { right_label : right . data . set_index ( right . _LONG_IDX ) , left_label : left . data . set_index ( left . _LONG_IDX ) } , axis = 1 ) ret . columns = ret . columns . droplevel ( 1 ) if drop_close : ret = ret [ ~ np . isclose ( ret [ left_label ] , ret [ right_label ] , ** kwargs ) ] return ret [ [ right_label , left_label ] ]
Compare the data in two IamDataFrames and return a pd . DataFrame
18,060
def concat ( dfs ) : if isstr ( dfs ) or not hasattr ( dfs , '__iter__' ) : msg = 'Argument must be a non-string iterable (e.g., list or tuple)' raise TypeError ( msg ) _df = None for df in dfs : df = df if isinstance ( df , IamDataFrame ) else IamDataFrame ( df ) if _df is None : _df = copy . deepcopy ( df ) else : _df . append ( df , inplace = True ) return _df
Concatenate a series of pyam . IamDataFrame - like objects together
18,061
def variables ( self , include_units = False ) : if include_units : return self . data [ [ 'variable' , 'unit' ] ] . drop_duplicates ( ) . reset_index ( drop = True ) . sort_values ( 'variable' ) else : return pd . Series ( self . data . variable . unique ( ) , name = 'variable' )
Get a list of variables
18,062
def append ( self , other , ignore_meta_conflict = False , inplace = False , ** kwargs ) : if not isinstance ( other , IamDataFrame ) : other = IamDataFrame ( other , ** kwargs ) ignore_meta_conflict = True if self . time_col is not other . time_col : raise ValueError ( 'incompatible time format (years vs. datetime)!' ) ret = copy . deepcopy ( self ) if not inplace else self diff = other . meta . index . difference ( ret . meta . index ) intersect = other . meta . index . intersection ( ret . meta . index ) if not intersect . empty : if not ignore_meta_conflict : cols = [ i for i in other . meta . columns if i in ret . meta . columns ] if not ret . meta . loc [ intersect , cols ] . equals ( other . meta . loc [ intersect , cols ] ) : conflict_idx = ( pd . concat ( [ ret . meta . loc [ intersect , cols ] , other . meta . loc [ intersect , cols ] ] ) . drop_duplicates ( ) . index . drop_duplicates ( ) ) msg = 'conflict in `meta` for scenarios {}' . format ( [ i for i in pd . DataFrame ( index = conflict_idx ) . index ] ) raise ValueError ( msg ) cols = [ i for i in other . meta . columns if i not in ret . meta . columns ] _meta = other . meta . loc [ intersect , cols ] ret . meta = ret . meta . merge ( _meta , how = 'outer' , left_index = True , right_index = True ) if not diff . empty : sort_kwarg = { } if int ( pd . __version__ . split ( '.' ) [ 1 ] ) < 23 else dict ( sort = False ) ret . meta = ret . meta . append ( other . meta . loc [ diff , : ] , ** sort_kwarg ) _data = ret . data . set_index ( ret . _LONG_IDX ) . append ( other . data . set_index ( other . _LONG_IDX ) , verify_integrity = True ) ret . extra_cols += [ i for i in other . extra_cols if i not in ret . extra_cols ] ret . _LONG_IDX = IAMC_IDX + [ ret . time_col ] + ret . extra_cols ret . data = sort_data ( _data . reset_index ( ) , ret . _LONG_IDX ) if not inplace : return ret
Append any castable object to this IamDataFrame . Columns in other . meta that are not in self . meta are always merged duplicate region - variable - unit - year rows raise a ValueError .
18,063
def pivot_table ( self , index , columns , values = 'value' , aggfunc = 'count' , fill_value = None , style = None ) : index = [ index ] if isstr ( index ) else index columns = [ columns ] if isstr ( columns ) else columns df = self . data if isstr ( aggfunc ) : if aggfunc == 'count' : df = self . data . groupby ( index + columns , as_index = False ) . count ( ) fill_value = 0 elif aggfunc == 'mean' : df = self . data . groupby ( index + columns , as_index = False ) . mean ( ) . round ( 2 ) aggfunc = np . sum fill_value = 0 if style == 'heatmap' else "" elif aggfunc == 'sum' : aggfunc = np . sum fill_value = 0 if style == 'heatmap' else "" df = df . pivot_table ( values = values , index = index , columns = columns , aggfunc = aggfunc , fill_value = fill_value ) return df
Returns a pivot table
18,064
def as_pandas ( self , with_metadata = False ) : if with_metadata : cols = self . _discover_meta_cols ( ** with_metadata ) if isinstance ( with_metadata , dict ) else self . meta . columns return ( self . data . set_index ( META_IDX ) . join ( self . meta [ cols ] ) . reset_index ( ) ) else : return self . data . copy ( )
Return this as a pd . DataFrame
18,065
def _new_meta_column ( self , name ) : if name is None : raise ValueError ( 'cannot add a meta column `{}`' . format ( name ) ) if name not in self . meta : self . meta [ name ] = np . nan
Add a column to meta if it doesn t exist set to value np . nan
18,066
def convert_unit ( self , conversion_mapping , inplace = False ) : ret = copy . deepcopy ( self ) if not inplace else self for current_unit , ( new_unit , factor ) in conversion_mapping . items ( ) : factor = pd . to_numeric ( factor ) where = ret . data [ 'unit' ] == current_unit ret . data . loc [ where , 'value' ] *= factor ret . data . loc [ where , 'unit' ] = new_unit if not inplace : return ret
Converts units based on provided unit conversion factors
18,067
def normalize ( self , inplace = False , ** kwargs ) : if len ( kwargs ) > 1 or self . time_col not in kwargs : raise ValueError ( 'Only time(year)-based normalization supported' ) ret = copy . deepcopy ( self ) if not inplace else self df = ret . data cols = self . time_col value = kwargs [ self . time_col ] x = df . set_index ( IAMC_IDX ) x [ 'value' ] /= x [ x [ cols ] == value ] [ 'value' ] ret . data = x . reset_index ( ) if not inplace : return ret
Normalize data to a given value . Currently only supports normalizing to a specific time .
18,068
def aggregate ( self , variable , components = None , append = False ) : components = components or self . _variable_components ( variable ) if not len ( components ) : msg = 'cannot aggregate variable `{}` because it has no components' logger ( ) . info ( msg . format ( variable ) ) return rows = self . _apply_filters ( variable = components ) _data = _aggregate ( self . data [ rows ] , 'variable' ) if append is True : self . append ( _data , variable = variable , inplace = True ) else : return _data
Compute the aggregate of timeseries components or sub - categories
18,069
def check_aggregate ( self , variable , components = None , exclude_on_fail = False , multiplier = 1 , ** kwargs ) : df_components = self . aggregate ( variable , components ) if df_components is None : return rows = self . _apply_filters ( variable = variable ) df_variable , df_components = ( _aggregate ( self . data [ rows ] , 'variable' ) . align ( df_components ) ) diff = df_variable [ ~ np . isclose ( df_variable , multiplier * df_components , ** kwargs ) ] if len ( diff ) : msg = '`{}` - {} of {} rows are not aggregates of components' logger ( ) . info ( msg . format ( variable , len ( diff ) , len ( df_variable ) ) ) if exclude_on_fail : self . _exclude_on_fail ( diff . index . droplevel ( [ 2 , 3 , 4 ] ) ) return IamDataFrame ( diff , variable = variable ) . timeseries ( )
Check whether a timeseries matches the aggregation of its components
18,070
def aggregate_region ( self , variable , region = 'World' , subregions = None , components = None , append = False ) : if subregions is None : rows = self . _apply_filters ( variable = variable ) subregions = set ( self . data [ rows ] . region ) - set ( [ region ] ) if not len ( subregions ) : msg = 'cannot aggregate variable `{}` to `{}` because it does not' ' exist in any subregion' logger ( ) . info ( msg . format ( variable , region ) ) return subregion_df = self . filter ( region = subregions ) cols = [ 'region' , 'variable' ] _data = _aggregate ( subregion_df . filter ( variable = variable ) . data , cols ) region_df = self . filter ( region = region ) components = components or ( set ( region_df . _variable_components ( variable ) ) . difference ( subregion_df . _variable_components ( variable ) ) ) if len ( components ) : rows = region_df . _apply_filters ( variable = components ) _data = _data . add ( _aggregate ( region_df . data [ rows ] , cols ) , fill_value = 0 ) if append is True : self . append ( _data , region = region , variable = variable , inplace = True ) else : return _data
Compute the aggregate of timeseries over a number of regions including variable components only defined at the region level
18,071
def check_aggregate_region ( self , variable , region = 'World' , subregions = None , components = None , exclude_on_fail = False , ** kwargs ) : df_subregions = self . aggregate_region ( variable , region , subregions , components ) if df_subregions is None : return rows = self . _apply_filters ( region = region , variable = variable ) df_region , df_subregions = ( _aggregate ( self . data [ rows ] , [ 'region' , 'variable' ] ) . align ( df_subregions ) ) diff = df_region [ ~ np . isclose ( df_region , df_subregions , ** kwargs ) ] if len ( diff ) : msg = ( '`{}` - {} of {} rows are not aggregates of subregions' ) logger ( ) . info ( msg . format ( variable , len ( diff ) , len ( df_region ) ) ) if exclude_on_fail : self . _exclude_on_fail ( diff . index . droplevel ( [ 2 , 3 ] ) ) col_args = dict ( region = region , variable = variable ) return IamDataFrame ( diff , ** col_args ) . timeseries ( )
Check whether the region timeseries data match the aggregation of components
18,072
def check_internal_consistency ( self , ** kwargs ) : inconsistent_vars = { } for variable in self . variables ( ) : diff_agg = self . check_aggregate ( variable , ** kwargs ) if diff_agg is not None : inconsistent_vars [ variable + "-aggregate" ] = diff_agg diff_regional = self . check_aggregate_region ( variable , ** kwargs ) if diff_regional is not None : inconsistent_vars [ variable + "-regional" ] = diff_regional return inconsistent_vars if inconsistent_vars else None
Check whether the database is internally consistent
18,073
def _apply_filters ( self , ** filters ) : regexp = filters . pop ( 'regexp' , False ) keep = np . array ( [ True ] * len ( self . data ) ) for col , values in filters . items ( ) : if values is None : continue if col in self . meta . columns : matches = pattern_match ( self . meta [ col ] , values , regexp = regexp ) cat_idx = self . meta [ matches ] . index keep_col = ( self . data [ META_IDX ] . set_index ( META_IDX ) . index . isin ( cat_idx ) ) elif col == 'variable' : level = filters [ 'level' ] if 'level' in filters else None keep_col = pattern_match ( self . data [ col ] , values , level , regexp ) elif col == 'year' : _data = self . data [ col ] if self . time_col is not 'time' else self . data [ 'time' ] . apply ( lambda x : x . year ) keep_col = years_match ( _data , values ) elif col == 'month' and self . time_col is 'time' : keep_col = month_match ( self . data [ 'time' ] . apply ( lambda x : x . month ) , values ) elif col == 'day' and self . time_col is 'time' : if isinstance ( values , str ) : wday = True elif isinstance ( values , list ) and isinstance ( values [ 0 ] , str ) : wday = True else : wday = False if wday : days = self . data [ 'time' ] . apply ( lambda x : x . weekday ( ) ) else : days = self . data [ 'time' ] . apply ( lambda x : x . day ) keep_col = day_match ( days , values ) elif col == 'hour' and self . time_col is 'time' : keep_col = hour_match ( self . data [ 'time' ] . apply ( lambda x : x . hour ) , values ) elif col == 'time' and self . time_col is 'time' : keep_col = datetime_match ( self . data [ col ] , values ) elif col == 'level' : if 'variable' not in filters . keys ( ) : keep_col = find_depth ( self . data [ 'variable' ] , level = values ) else : continue elif col in self . data . columns : keep_col = pattern_match ( self . data [ col ] , values , regexp = regexp ) else : _raise_filter_error ( col ) keep &= keep_col return keep
Determine rows to keep in data for given set of filters
18,074
def col_apply ( self , col , func , * args , ** kwargs ) : if col in self . data : self . data [ col ] = self . data [ col ] . apply ( func , * args , ** kwargs ) else : self . meta [ col ] = self . meta [ col ] . apply ( func , * args , ** kwargs )
Apply a function to a column
18,075
def _to_file_format ( self , iamc_index ) : df = self . timeseries ( iamc_index = iamc_index ) . reset_index ( ) df = df . rename ( columns = { c : str ( c ) . title ( ) for c in df . columns } ) return df
Return a dataframe suitable for writing to a file
18,076
def to_csv ( self , path , iamc_index = False , ** kwargs ) : self . _to_file_format ( iamc_index ) . to_csv ( path , index = False , ** kwargs )
Write timeseries data to a csv file
18,077
def to_excel ( self , excel_writer , sheet_name = 'data' , iamc_index = False , ** kwargs ) : if not isinstance ( excel_writer , pd . ExcelWriter ) : close = True excel_writer = pd . ExcelWriter ( excel_writer ) self . _to_file_format ( iamc_index ) . to_excel ( excel_writer , sheet_name = sheet_name , index = False , ** kwargs ) if close : excel_writer . close ( )
Write timeseries data to Excel format
18,078
def export_metadata ( self , path ) : writer = pd . ExcelWriter ( path ) write_sheet ( writer , 'meta' , self . meta , index = True ) writer . save ( )
Export metadata to Excel
18,079
def load_metadata ( self , path , * args , ** kwargs ) : if not os . path . exists ( path ) : raise ValueError ( "no metadata file '" + path + "' found!" ) if path . endswith ( 'csv' ) : df = pd . read_csv ( path , * args , ** kwargs ) else : xl = pd . ExcelFile ( path ) if len ( xl . sheet_names ) > 1 and 'sheet_name' not in kwargs : kwargs [ 'sheet_name' ] = 'meta' df = pd . read_excel ( path , * args , ** kwargs ) req_cols = [ 'model' , 'scenario' , 'exclude' ] if not set ( req_cols ) . issubset ( set ( df . columns ) ) : e = 'File `{}` does not have required columns ({})!' raise ValueError ( e . format ( path , req_cols ) ) df . set_index ( META_IDX , inplace = True ) idx = self . meta . index . intersection ( df . index ) n_invalid = len ( df ) - len ( idx ) if n_invalid > 0 : msg = 'Ignoring {} scenario{} from imported metadata' logger ( ) . info ( msg . format ( n_invalid , 's' if n_invalid > 1 else '' ) ) if idx . empty : raise ValueError ( 'No valid scenarios in imported metadata file!' ) df = df . loc [ idx ] msg = 'Importing metadata for {} scenario{} (for total of {})' logger ( ) . info ( msg . format ( len ( df ) , 's' if len ( df ) > 1 else '' , len ( self . meta ) ) ) for col in df . columns : self . _new_meta_column ( col ) self . meta [ col ] = df [ col ] . combine_first ( self . meta [ col ] ) self . meta . exclude = self . meta . exclude . astype ( 'bool' )
Load metadata exported from pyam . IamDataFrame instance
18,080
def line_plot ( self , x = 'year' , y = 'value' , ** kwargs ) : df = self . as_pandas ( with_metadata = kwargs ) variables = df [ 'variable' ] . unique ( ) if x in variables or y in variables : keep_vars = set ( [ x , y ] ) & set ( variables ) df = df [ df [ 'variable' ] . isin ( keep_vars ) ] idx = list ( set ( df . columns ) - set ( [ 'value' ] ) ) df = ( df . reset_index ( ) . set_index ( idx ) . value . unstack ( level = 'variable' ) . rename_axis ( None , axis = 1 ) . reset_index ( ) . set_index ( META_IDX ) ) if x != 'year' and y != 'year' : df = df . drop ( 'year' , axis = 1 ) ax , handles , labels = plotting . line_plot ( df . dropna ( ) , x = x , y = y , ** kwargs ) return ax
Plot timeseries lines of existing data
18,081
def stack_plot ( self , * args , ** kwargs ) : df = self . as_pandas ( with_metadata = True ) ax = plotting . stack_plot ( df , * args , ** kwargs ) return ax
Plot timeseries stacks of existing data
18,082
def scatter ( self , x , y , ** kwargs ) : variables = self . data [ 'variable' ] . unique ( ) xisvar = x in variables yisvar = y in variables if not xisvar and not yisvar : cols = [ x , y ] + self . _discover_meta_cols ( ** kwargs ) df = self . meta [ cols ] . reset_index ( ) elif xisvar and yisvar : dfx = ( self . filter ( variable = x ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : x , 'unit' : 'xunit' } ) . set_index ( YEAR_IDX ) . drop ( 'variable' , axis = 1 ) ) dfy = ( self . filter ( variable = y ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : y , 'unit' : 'yunit' } ) . set_index ( YEAR_IDX ) . drop ( 'variable' , axis = 1 ) ) df = dfx . join ( dfy , lsuffix = '_left' , rsuffix = '' ) . reset_index ( ) else : var = x if xisvar else y df = ( self . filter ( variable = var ) . as_pandas ( with_metadata = kwargs ) . rename ( columns = { 'value' : var } ) ) ax = plotting . scatter ( df . dropna ( ) , x , y , ** kwargs ) return ax
Plot a scatter chart using metadata columns
18,083
def update ( self , rc ) : rc = self . _load_yaml ( rc ) self . store = _recursive_update ( self . store , rc )
Add additional run control parameters
18,084
def recursive_update ( self , k , d ) : u = self . __getitem__ ( k ) self . store [ k ] = _recursive_update ( u , d )
Recursively update a top - level option in the run control
18,085
def available_metadata ( self ) : url = self . base_url + 'metadata/types' headers = { 'Authorization' : 'Bearer {}' . format ( self . auth ( ) ) } r = requests . get ( url , headers = headers ) return pd . read_json ( r . content , orient = 'records' ) [ 'name' ]
List all scenario metadata indicators available in the connected data source
18,086
def metadata ( self , default = True ) : default = 'true' if default else 'false' add_url = 'runs?getOnlyDefaultRuns={}&includeMetadata=true' url = self . base_url + add_url . format ( default ) headers = { 'Authorization' : 'Bearer {}' . format ( self . auth ( ) ) } r = requests . get ( url , headers = headers ) df = pd . read_json ( r . content , orient = 'records' ) def extract ( row ) : return ( pd . concat ( [ row [ [ 'model' , 'scenario' ] ] , pd . Series ( row . metadata ) ] ) . to_frame ( ) . T . set_index ( [ 'model' , 'scenario' ] ) ) return pd . concat ( [ extract ( row ) for idx , row in df . iterrows ( ) ] , sort = False ) . reset_index ( )
Metadata of scenarios in the connected data source
18,087
def variables ( self ) : url = self . base_url + 'ts' headers = { 'Authorization' : 'Bearer {}' . format ( self . auth ( ) ) } r = requests . get ( url , headers = headers ) df = pd . read_json ( r . content , orient = 'records' ) return pd . Series ( df [ 'variable' ] . unique ( ) , name = 'variable' )
All variables in the connected data source
18,088
def query ( self , ** kwargs ) : headers = { 'Authorization' : 'Bearer {}' . format ( self . auth ( ) ) , 'Content-Type' : 'application/json' , } data = json . dumps ( self . _query_post_data ( ** kwargs ) ) url = self . base_url + 'runs/bulk/ts' r = requests . post ( url , headers = headers , data = data ) df = ( pd . read_json ( r . content , orient = 'records' ) . drop ( columns = 'runId' ) . rename ( columns = { 'time' : 'subannual' } ) ) if pd . Series ( [ i in [ - 1 , 'year' ] for i in df . subannual ] ) . all ( ) : df . drop ( columns = 'subannual' , inplace = True ) lst = ( df [ META_IDX + [ 'version' ] ] . drop_duplicates ( ) . groupby ( META_IDX ) . count ( ) . version ) if max ( lst ) > 1 : raise ValueError ( 'multiple versions for {}' . format ( lst [ lst > 1 ] . index . to_list ( ) ) ) df . drop ( columns = 'version' , inplace = True ) return df
Query the data source subselecting data . Available keyword arguments include
18,089
def reindex ( self , copy = True ) : ret = deepcopy ( self ) if copy else self ret . stats = ret . stats . reindex ( index = ret . _idx , level = 0 ) if ret . idx_depth == 2 : ret . stats = ret . stats . reindex ( index = ret . _sub_idx , level = 1 ) if ret . rows is not None : ret . stats = ret . stats . reindex ( index = ret . rows , level = ret . idx_depth ) ret . stats = ret . stats . reindex ( columns = ret . _headers , level = 0 ) ret . stats = ret . stats . reindex ( columns = ret . _subheaders , level = 1 ) ret . stats = ret . stats . reindex ( columns = ret . _describe_cols , level = 2 ) if copy : return ret
Reindex the summary statistics dataframe
18,090
def summarize ( self , center = 'mean' , fullrange = None , interquartile = None , custom_format = '{:.2f}' ) : self . reindex ( copy = False ) center = 'median' if center == '50%' else center if fullrange is None and interquartile is None : fullrange = True return self . stats . apply ( format_rows , center = center , fullrange = fullrange , interquartile = interquartile , custom_format = custom_format , axis = 1 , raw = False )
Format the compiled statistics to a concise string output
18,091
def reset_default_props ( ** kwargs ) : global _DEFAULT_PROPS pcycle = plt . rcParams [ 'axes.prop_cycle' ] _DEFAULT_PROPS = { 'color' : itertools . cycle ( _get_standard_colors ( ** kwargs ) ) if len ( kwargs ) > 0 else itertools . cycle ( [ x [ 'color' ] for x in pcycle ] ) , 'marker' : itertools . cycle ( [ 'o' , 'x' , '.' , '+' , '*' ] ) , 'linestyle' : itertools . cycle ( [ '-' , '--' , '-.' , ':' ] ) , }
Reset properties to initial cycle point
18,092
def default_props ( reset = False , ** kwargs ) : global _DEFAULT_PROPS if _DEFAULT_PROPS is None or reset : reset_default_props ( ** kwargs ) return _DEFAULT_PROPS
Return current default properties
18,093
def assign_style_props ( df , color = None , marker = None , linestyle = None , cmap = None ) : if color is None and cmap is not None : raise ValueError ( '`cmap` must be provided with the `color` argument' ) n = len ( df [ color ] . unique ( ) ) if color in df . columns else len ( df [ list ( set ( df . columns ) & set ( IAMC_IDX ) ) ] . drop_duplicates ( ) ) defaults = default_props ( reset = True , num_colors = n , colormap = cmap ) props = { } rc = run_control ( ) kinds = [ ( 'color' , color ) , ( 'marker' , marker ) , ( 'linestyle' , linestyle ) ] for kind , var in kinds : rc_has_kind = kind in rc if var in df . columns : rc_has_var = rc_has_kind and var in rc [ kind ] props_for_kind = { } for val in df [ var ] . unique ( ) : if rc_has_var and val in rc [ kind ] [ var ] : props_for_kind [ val ] = rc [ kind ] [ var ] [ val ] next ( defaults [ kind ] ) else : props_for_kind [ val ] = next ( defaults [ kind ] ) props [ kind ] = props_for_kind if 'color' in props : d = props [ 'color' ] values = list ( d . values ( ) ) overlap_idx = np . in1d ( values , list ( PYAM_COLORS . keys ( ) ) ) if overlap_idx . any ( ) : keys = np . array ( list ( d . keys ( ) ) ) [ overlap_idx ] values = np . array ( values ) [ overlap_idx ] for k , v in zip ( keys , values ) : d [ k ] = PYAM_COLORS [ v ] props [ 'color' ] = d return props
Assign the style properties for a plot
18,094
def reshape_line_plot ( df , x , y ) : idx = list ( df . columns . drop ( y ) ) if df . duplicated ( idx ) . any ( ) : warnings . warn ( 'Duplicated index found.' ) df = df . drop_duplicates ( idx , keep = 'last' ) df = df . set_index ( idx ) [ y ] . unstack ( x ) . T return df
Reshape data from long form to line plot form .
18,095
def reshape_bar_plot ( df , x , y , bars ) : idx = [ bars , x ] if df . duplicated ( idx ) . any ( ) : warnings . warn ( 'Duplicated index found.' ) df = df . drop_duplicates ( idx , keep = 'last' ) df = df . set_index ( idx ) [ y ] . unstack ( x ) . T return df
Reshape data from long form to bar plot form .
18,096
def read_shapefile ( fname , region_col = None , ** kwargs ) : gdf = gpd . read_file ( fname , ** kwargs ) if region_col is not None : gdf = gdf . rename ( columns = { region_col : 'region' } ) if 'region' not in gdf . columns : raise IOError ( 'Must provide a region column' ) gdf [ 'region' ] = gdf [ 'region' ] . str . upper ( ) return gdf
Read a shapefile for use in regional plots . Shapefiles must have a column denoted as region .
18,097
def add_net_values_to_bar_plot ( axs , color = 'k' ) : axs = axs if isinstance ( axs , Iterable ) else [ axs ] for ax in axs : box_args = _get_boxes ( ax ) for x , args in box_args . items ( ) : rect = mpatches . Rectangle ( * args , color = color ) ax . add_patch ( rect )
Add net values next to an existing vertical stacked bar chart
18,098
def scatter ( df , x , y , ax = None , legend = None , title = None , color = None , marker = 'o' , linestyle = None , cmap = None , groupby = [ 'model' , 'scenario' ] , with_lines = False , ** kwargs ) : if ax is None : fig , ax = plt . subplots ( ) props = assign_style_props ( df , color = color , marker = marker , linestyle = linestyle , cmap = cmap ) groups = df . groupby ( groupby ) legend_data = [ ] for name , group in groups : pargs = { } labels = [ ] for key , kind , var in [ ( 'c' , 'color' , color ) , ( 'marker' , 'marker' , marker ) , ( 'linestyle' , 'linestyle' , linestyle ) ] : if kind in props : label = group [ var ] . values [ 0 ] pargs [ key ] = props [ kind ] [ group [ var ] . values [ 0 ] ] labels . append ( repr ( label ) . lstrip ( "u'" ) . strip ( "'" ) ) else : pargs [ key ] = var if len ( labels ) > 0 : legend_data . append ( ' ' . join ( labels ) ) else : legend_data . append ( ' ' . join ( name ) ) kwargs . update ( pargs ) if with_lines : ax . plot ( group [ x ] , group [ y ] , ** kwargs ) else : kwargs . pop ( 'linestyle' ) ax . scatter ( group [ x ] , group [ y ] , ** kwargs ) handles , labels = ax . get_legend_handles_labels ( ) if legend_data != [ '' ] * len ( legend_data ) : labels = sorted ( list ( set ( tuple ( legend_data ) ) ) ) idxs = [ legend_data . index ( d ) for d in labels ] handles = [ handles [ i ] for i in idxs ] if legend is None and len ( labels ) < 13 or legend is not False : _add_legend ( ax , handles , labels , legend ) ax . set_xlabel ( x ) ax . set_ylabel ( y ) if title : ax . set_title ( title ) return ax
Plot data as a scatter chart .
18,099
def logger ( ) : global _LOGGER if _LOGGER is None : logging . basicConfig ( ) _LOGGER = logging . getLogger ( ) _LOGGER . setLevel ( 'INFO' ) return _LOGGER
Access global logger