idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
12,800
def version ( self ) : cmd = b"version\r\n" results = self . _misc_cmd ( [ cmd ] , b'version' , False ) before , _ , after = results [ 0 ] . partition ( b' ' ) if before != b'VERSION' : raise MemcacheUnknownError ( "Received unexpected response: %s" % results [ 0 ] ) return after
The memcached version command .
12,801
def flush_all ( self , delay = 0 , noreply = None ) : if noreply is None : noreply = self . default_noreply cmd = b'flush_all ' + six . text_type ( delay ) . encode ( 'ascii' ) if noreply : cmd += b' noreply' cmd += b'\r\n' results = self . _misc_cmd ( [ cmd ] , b'flush_all' , noreply ) if noreply : return True return results [ 0 ] == b'OK'
The memcached flush_all command .
12,802
def quit ( self ) : cmd = b"quit\r\n" self . _misc_cmd ( [ cmd ] , b'quit' , True ) self . close ( )
The memcached quit command .
12,803
def _create_list ( value , allow_filename = False ) : if isinstance ( value , list ) : return value elif isinstance ( value , string_type ) : if allow_filename and os . path . isfile ( value ) : with codecs . open ( value , 'r' , encoding = "utf-8" ) as handle : return handle . read ( ) . splitlines ( ) return value . split ( ',' ) else : raise ValueError ( "Can't create list for input {}" . format ( value ) )
Create a list from the input value .
12,804
def is_compatible_assembly_level ( self , ncbi_assembly_level ) : configured_ncbi_strings = [ self . _LEVELS [ level ] for level in self . assembly_level ] return ncbi_assembly_level in configured_ncbi_strings
Check if a given ncbi assembly level string matches the configured assembly levels .
12,805
def from_kwargs ( cls , ** kwargs ) : config = cls ( ) for slot in cls . __slots__ : if slot . startswith ( '_' ) : slot = slot [ 1 : ] setattr ( config , slot , kwargs . pop ( slot , cls . get_default ( slot ) ) ) if kwargs : raise ValueError ( "Unrecognized option(s): {}" . format ( kwargs . keys ( ) ) ) return config
Initialise configuration from kwargs .
12,806
def from_namespace ( cls , namespace ) : config = cls ( ) for slot in cls . __slots__ : if slot . startswith ( '_' ) : slot = slot [ 1 : ] if not hasattr ( namespace , slot ) : continue setattr ( config , slot , getattr ( namespace , slot ) ) return config
Initialise from argparser Namespace object .
12,807
def get_default ( cls , category ) : value = cls . _DEFAULTS [ category ] if not value or not isinstance ( value , list ) : return value return value [ 0 ]
Get the default value of a given category .
12,808
def get_choices ( cls , category ) : value = cls . _DEFAULTS [ category ] if not isinstance ( value , list ) : raise ValueError ( "{} does not offer choices" . format ( category ) ) return value
Get all available options for a category .
12,809
def main ( ) : parser = argument_parser ( version = __version__ ) args = parser . parse_args ( ) if args . debug : log_level = logging . DEBUG elif args . verbose : log_level = logging . INFO else : log_level = logging . WARNING logging . basicConfig ( format = '%(levelname)s: %(message)s' , level = log_level ) max_retries = args . retries attempts = 0 ret = args_download ( args ) while ret == 75 and attempts < max_retries : attempts += 1 logging . error ( 'Downloading from NCBI failed due to a connection error, retrying. Retries so far: %s' , attempts ) ret = args_download ( args ) return ret
Build and parse command line .
12,810
def get ( columns = None ) : if columns is None : columns = _DEFAULT_COLUMNS global _METADATA if not _METADATA : _METADATA = MetaData ( columns ) return _METADATA
Get or create MetaData singleton .
12,811
def add ( self , entry , local_file ) : row = self . rowClass ( ) for key , val in entry . items ( ) : if key in self . columns : setattr ( row , key , val ) row . local_filename = os . path . join ( '.' , os . path . relpath ( local_file ) ) self . rows . append ( row )
Add a metadata row .
12,812
def write ( self , handle ) : handle . write ( u"\t" . join ( self . columns ) ) handle . write ( u"\n" ) for row in self . rows : row . write ( handle )
Write metadata to handle .
12,813
def config_download ( config ) : try : download_candidates = select_candidates ( config ) if len ( download_candidates ) < 1 : logging . error ( "No downloads matched your filter. Please check your options." ) return 1 if config . dry_run : print ( "Considering the following {} assemblies for download:" . format ( len ( download_candidates ) ) ) for entry , _ in download_candidates : print ( entry [ 'assembly_accession' ] , entry [ 'organism_name' ] , sep = "\t" ) return 0 download_jobs = [ ] for entry , group in download_candidates : download_jobs . extend ( create_downloadjob ( entry , group , config ) ) if config . parallel == 1 : for dl_job in download_jobs : worker ( dl_job ) else : pool = Pool ( processes = config . parallel ) jobs = pool . map_async ( worker , download_jobs ) try : jobs . get ( 0xFFFF ) except KeyboardInterrupt : logging . error ( "Interrupted by user" ) return 1 if config . metadata_table : with codecs . open ( config . metadata_table , mode = 'w' , encoding = 'utf-8' ) as handle : table = metadata . get ( ) table . write ( handle ) except requests . exceptions . ConnectionError as err : logging . error ( 'Download from NCBI failed: %r' , err ) return 75 return 0
Run the actual download from NCBI with parameters in a config object .
12,814
def select_candidates ( config ) : download_candidates = [ ] for group in config . group : summary_file = get_summary ( config . section , group , config . uri , config . use_cache ) entries = parse_summary ( summary_file ) for entry in filter_entries ( entries , config ) : download_candidates . append ( ( entry , group ) ) return download_candidates
Select candidates to download .
12,815
def filter_entries ( entries , config ) : def in_genus_list ( species , genus_list ) : for genus in genus_list : if species . startswith ( genus . capitalize ( ) ) : return True return False new_entries = [ ] for entry in entries : if config . type_material and config . type_material != [ 'any' ] : requested_types = map ( lambda x : config . _RELATION_TO_TYPE_MATERIAL [ x ] , config . type_material ) if not entry [ 'relation_to_type_material' ] or entry [ 'relation_to_type_material' ] not in requested_types : logging . debug ( "Skipping assembly with no reference to type material or reference to type material does not match requested" ) continue else : print ( entry [ 'relation_to_type_material' ] ) if config . genus and not in_genus_list ( entry [ 'organism_name' ] , config . genus ) : logging . debug ( 'Organism name %r does not start with any in %r, skipping' , entry [ 'organism_name' ] , config . genus ) continue if config . species_taxid and entry [ 'species_taxid' ] not in config . species_taxid : logging . debug ( 'Species TaxID %r does not match with any in %r, skipping' , entry [ 'species_taxid' ] , config . species_taxid ) continue if config . taxid and entry [ 'taxid' ] not in config . taxid : logging . debug ( 'Organism TaxID %r does not match with any in %r, skipping' , entry [ 'taxid' ] , config . taxid ) continue if not config . is_compatible_assembly_accession ( entry [ 'assembly_accession' ] ) : logging . debug ( 'Skipping entry with incompatible assembly accession %r' , entry [ 'assembly_accession' ] ) continue if not config . is_compatible_assembly_level ( entry [ 'assembly_level' ] ) : logging . debug ( 'Skipping entry with assembly level %r' , entry [ 'assembly_level' ] ) continue if config . refseq_category != 'all' and entry [ 'refseq_category' ] != config . get_refseq_category_string ( config . refseq_category ) : logging . debug ( 'Skipping entry with refseq_category %r, not %r' , entry [ 'refseq_category' ] , config . refseq_category ) continue new_entries . append ( entry ) return new_entries
Narrrow down which entries to download .
12,816
def worker ( job ) : ret = False try : if job . full_url is not None : req = requests . get ( job . full_url , stream = True ) ret = save_and_check ( req , job . local_file , job . expected_checksum ) if not ret : return ret ret = create_symlink ( job . local_file , job . symlink_path ) except KeyboardInterrupt : logging . debug ( "Ignoring keyboard interrupt." ) return ret
Run a single download job .
12,817
def get_summary ( section , domain , uri , use_cache ) : logging . debug ( 'Checking for a cached summary file' ) cachefile = "{section}_{domain}_assembly_summary.txt" . format ( section = section , domain = domain ) full_cachefile = os . path . join ( CACHE_DIR , cachefile ) if use_cache and os . path . exists ( full_cachefile ) and datetime . utcnow ( ) - datetime . fromtimestamp ( os . path . getmtime ( full_cachefile ) ) < timedelta ( days = 1 ) : logging . info ( 'Using cached summary.' ) with codecs . open ( full_cachefile , 'r' , encoding = 'utf-8' ) as fh : return StringIO ( fh . read ( ) ) logging . debug ( 'Downloading summary for %r/%r uri: %r' , section , domain , uri ) url = '{uri}/{section}/{domain}/assembly_summary.txt' . format ( section = section , domain = domain , uri = uri ) req = requests . get ( url ) if use_cache : try : os . makedirs ( CACHE_DIR ) except OSError as err : if err . errno != 17 : raise with codecs . open ( full_cachefile , 'w' , encoding = 'utf-8' ) as fh : fh . write ( req . text ) return StringIO ( req . text )
Get the assembly_summary . txt file from NCBI and return a StringIO object for it .
12,818
def create_downloadjob ( entry , domain , config ) : logging . info ( 'Checking record %r' , entry [ 'assembly_accession' ] ) full_output_dir = create_dir ( entry , config . section , domain , config . output ) symlink_path = None if config . human_readable : symlink_path = create_readable_dir ( entry , config . section , domain , config . output ) checksums = grab_checksums_file ( entry ) with open ( os . path . join ( full_output_dir , 'MD5SUMS' ) , 'w' ) as handle : handle . write ( checksums ) parsed_checksums = parse_checksums ( checksums ) download_jobs = [ ] for fmt in config . file_format : try : if has_file_changed ( full_output_dir , parsed_checksums , fmt ) : download_jobs . append ( download_file_job ( entry , full_output_dir , parsed_checksums , fmt , symlink_path ) ) elif need_to_create_symlink ( full_output_dir , parsed_checksums , fmt , symlink_path ) : download_jobs . append ( create_symlink_job ( full_output_dir , parsed_checksums , fmt , symlink_path ) ) except ValueError as err : logging . error ( err ) return download_jobs
Create download jobs for all file formats from a summary file entry .
12,819
def create_dir ( entry , section , domain , output ) : full_output_dir = os . path . join ( output , section , domain , entry [ 'assembly_accession' ] ) try : os . makedirs ( full_output_dir ) except OSError as err : if err . errno == errno . EEXIST and os . path . isdir ( full_output_dir ) : pass else : raise return full_output_dir
Create the output directory for the entry if needed .
12,820
def create_readable_dir ( entry , section , domain , output ) : if domain != 'viral' : full_output_dir = os . path . join ( output , 'human_readable' , section , domain , get_genus_label ( entry ) , get_species_label ( entry ) , get_strain_label ( entry ) ) else : full_output_dir = os . path . join ( output , 'human_readable' , section , domain , entry [ 'organism_name' ] . replace ( ' ' , '_' ) , get_strain_label ( entry , viral = True ) ) try : os . makedirs ( full_output_dir ) except OSError as err : if err . errno == errno . EEXIST and os . path . isdir ( full_output_dir ) : pass else : raise return full_output_dir
Create the a human - readable directory to link the entry to if needed .
12,821
def grab_checksums_file ( entry ) : http_url = convert_ftp_url ( entry [ 'ftp_path' ] ) full_url = '{}/md5checksums.txt' . format ( http_url ) req = requests . get ( full_url ) return req . text
Grab the checksum file for a given entry .
12,822
def parse_checksums ( checksums_string ) : checksums_list = [ ] for line in checksums_string . split ( '\n' ) : try : if line == '' : continue checksum , filename = line . split ( ) if filename . startswith ( './' ) : filename = filename [ 2 : ] checksums_list . append ( { 'checksum' : checksum , 'file' : filename } ) except ValueError : logging . debug ( 'Skipping over unexpected checksum line %r' , line ) continue return checksums_list
Parse a file containing checksums and filenames .
12,823
def has_file_changed ( directory , checksums , filetype = 'genbank' ) : pattern = NgdConfig . get_fileending ( filetype ) filename , expected_checksum = get_name_and_checksum ( checksums , pattern ) full_filename = os . path . join ( directory , filename ) if not os . path . isfile ( full_filename ) : return True actual_checksum = md5sum ( full_filename ) return expected_checksum != actual_checksum
Check if the checksum of a given file has changed .
12,824
def need_to_create_symlink ( directory , checksums , filetype , symlink_path ) : if symlink_path is None : return False pattern = NgdConfig . get_fileending ( filetype ) filename , _ = get_name_and_checksum ( checksums , pattern ) full_filename = os . path . join ( directory , filename ) symlink_name = os . path . join ( symlink_path , filename ) if os . path . islink ( symlink_name ) : existing_link = os . readlink ( symlink_name ) if full_filename == existing_link : return False return True
Check if we need to create a symlink for an existing file .
12,825
def get_name_and_checksum ( checksums , end ) : for entry in checksums : if not entry [ 'file' ] . endswith ( end ) : continue if '_from_' not in end and '_from_' in entry [ 'file' ] : continue filename = entry [ 'file' ] expected_checksum = entry [ 'checksum' ] return filename , expected_checksum raise ValueError ( 'No entry for file ending in {!r}' . format ( end ) )
Extract a full filename and checksum from the checksums list for a file ending in given end .
12,826
def md5sum ( filename ) : hash_md5 = hashlib . md5 ( ) with open ( filename , 'rb' ) as handle : for chunk in iter ( lambda : handle . read ( 4096 ) , b'' ) : hash_md5 . update ( chunk ) return hash_md5 . hexdigest ( )
Calculate the md5sum of a file and return the hexdigest .
12,827
def download_file_job ( entry , directory , checksums , filetype = 'genbank' , symlink_path = None ) : pattern = NgdConfig . get_fileending ( filetype ) filename , expected_checksum = get_name_and_checksum ( checksums , pattern ) base_url = convert_ftp_url ( entry [ 'ftp_path' ] ) full_url = '{}/{}' . format ( base_url , filename ) local_file = os . path . join ( directory , filename ) full_symlink = None if symlink_path is not None : full_symlink = os . path . join ( symlink_path , filename ) mtable = metadata . get ( ) mtable . add ( entry , local_file ) return DownloadJob ( full_url , local_file , expected_checksum , full_symlink )
Generate a DownloadJob that actually triggers a file download .
12,828
def create_symlink_job ( directory , checksums , filetype , symlink_path ) : pattern = NgdConfig . get_fileending ( filetype ) filename , _ = get_name_and_checksum ( checksums , pattern ) local_file = os . path . join ( directory , filename ) full_symlink = os . path . join ( symlink_path , filename ) return DownloadJob ( None , local_file , None , full_symlink )
Create a symlink - creating DownloadJob for an already downloaded file .
12,829
def save_and_check ( response , local_file , expected_checksum ) : with open ( local_file , 'wb' ) as handle : for chunk in response . iter_content ( 4096 ) : handle . write ( chunk ) actual_checksum = md5sum ( local_file ) if actual_checksum != expected_checksum : logging . error ( 'Checksum mismatch for %r. Expected %r, got %r' , local_file , expected_checksum , actual_checksum ) return False return True
Save the content of an http response and verify the checksum matches .
12,830
def create_symlink ( local_file , symlink_path ) : if symlink_path is not None : if os . path . exists ( symlink_path ) or os . path . lexists ( symlink_path ) : os . unlink ( symlink_path ) local_file = os . path . normpath ( local_file ) symlink_path = os . path . normpath ( symlink_path ) num_dirs_upward = len ( os . path . dirname ( symlink_path ) . split ( os . sep ) ) local_relative_to_symlink = num_dirs_upward * ( os . pardir + os . sep ) os . symlink ( os . path . join ( local_relative_to_symlink , local_file ) , symlink_path ) return True
Create a relative symbolic link if symlink path is given .
12,831
def get_strain_label ( entry , viral = False ) : def get_strain ( entry ) : strain = entry [ 'infraspecific_name' ] if strain != '' : strain = strain . split ( '=' ) [ - 1 ] return strain strain = entry [ 'isolate' ] if strain != '' : return strain if len ( entry [ 'organism_name' ] . split ( ' ' ) ) > 2 and not viral : strain = ' ' . join ( entry [ 'organism_name' ] . split ( ' ' ) [ 2 : ] ) return strain return entry [ 'assembly_accession' ] def cleanup ( strain ) : strain = strain . strip ( ) strain = strain . replace ( ' ' , '_' ) strain = strain . replace ( ';' , '_' ) strain = strain . replace ( '/' , '_' ) strain = strain . replace ( '\\' , '_' ) return strain return cleanup ( get_strain ( entry ) )
Try to extract a strain from an assemly summary entry .
12,832
def pretty ( d , indent = 0 ) : for key , value in d . items ( ) : print ( ' ' * indent + str ( key ) ) if isinstance ( value , dict ) : pretty ( value , indent + 1 ) else : sys . stderr . write ( ' ' * ( indent + 1 ) + str ( value ) + '\n' )
A prettier way to print nested dicts
12,833
def desc_taxa ( taxid , ncbi , outFH , just_taxids = False ) : descendent_taxa = ncbi . get_descendant_taxa ( taxid ) descendent_taxa_names = ncbi . translate_to_names ( descendent_taxa ) if just_taxids : for taxid in descendent_taxa : outFH . write ( str ( taxid ) + '\n' ) else : for dtn , dt in zip ( descendent_taxa_names , descendent_taxa ) : x = [ str ( x ) for x in [ taxid , dt , dtn ] ] outFH . write ( '\t' . join ( x ) + '\n' )
Write descendent taxa for taxid
12,834
def taxon_info ( taxid , ncbi , outFH ) : taxid = int ( taxid ) tax_name = ncbi . get_taxid_translator ( [ taxid ] ) [ taxid ] rank = list ( ncbi . get_rank ( [ taxid ] ) . values ( ) ) [ 0 ] lineage = ncbi . get_taxid_translator ( ncbi . get_lineage ( taxid ) ) lineage = [ '{}:{}' . format ( k , v ) for k , v in lineage . items ( ) ] lineage = ';' . join ( lineage ) x = [ str ( x ) for x in [ tax_name , taxid , rank , lineage ] ] outFH . write ( '\t' . join ( x ) + '\n' )
Write info on taxid
12,835
def name2taxid ( taxids , ncbi ) : new_taxids = [ ] for taxid in taxids : try : new_taxids . append ( ncbi . get_name_translator ( [ taxid ] ) [ taxid ] [ 0 ] ) except KeyError : try : new_taxids . append ( int ( taxid ) ) except ValueError : msg = 'Error: cannot convert to taxid: {}' raise ValueError ( msg . format ( taxid ) ) return new_taxids
Converting taxon names to taxids
12,836
def main ( ) : args = get_args ( ) ncbi = NCBITaxa ( dbfile = args . database ) if args . verbose > 1 : sys . stderr . write ( 'Taxa database is stored at {}\n' . format ( ncbi . dbfile ) ) if args . update is True : if args . verbose > 1 : msg = 'Updating the taxonomy database. This may take several minutes...\n' sys . stderr . write ( msg ) ncbi . update_taxonomy_database ( ) args . taxid = args . taxid . replace ( '"' , '' ) . replace ( "'" , '' ) . split ( ',' ) args . taxid = name2taxid ( args . taxid , ncbi ) if args . outfile is None : outFH = sys . stdout else : outFH = open ( args . outfile , 'w' ) if args . taxon_info : outFH . write ( '\t' . join ( [ 'name' , 'taxid' , 'rank' , 'lineage' ] ) + '\n' ) elif not args . just_taxids : outFH . write ( '\t' . join ( [ 'parent_taxid' , 'descendent_taxid' , 'descendent_name' ] ) + '\n' ) for taxid in args . taxid : if args . taxon_info : taxon_info ( taxid , ncbi , outFH ) else : desc_taxa ( taxid , ncbi , outFH , args . just_taxids ) outFH . close ( )
Make queries against NCBI Taxa databases
12,837
def derivative ( self , x ) : z = np . asarray ( x ) return ( self . _der ( z . flatten ( ) ) ) . reshape ( z . shape )
Evaluates the derivative of the interpolated function at the given input .
12,838
def eval_with_derivative ( self , x ) : z = np . asarray ( x ) y , dydx = self . _evalAndDer ( z . flatten ( ) ) return y . reshape ( z . shape ) , dydx . reshape ( z . shape )
Evaluates the interpolated function and its derivative at the given input .
12,839
def derivative ( self , * args ) : if self . i_dim == 0 : return np . ones_like ( * args [ 0 ] ) else : return np . zeros_like ( * args [ 0 ] )
Returns the derivative of the function with respect to the first dimension .
12,840
def derivativeX ( self , * args ) : if self . n_dims >= 4 : j = 1 else : j = 0 if self . i_dim == j : return np . ones_like ( * args [ 0 ] ) else : return np . zeros_like ( * args [ 0 ] )
Returns the derivative of the function with respect to the X dimension . This is the first input whenever n_dims < 4 and the second input otherwise .
12,841
def derivativeW ( self , * args ) : if self . n_dims >= 4 : j = 0 else : assert False , "Derivative with respect to W can't be called when n_dims < 4!" if self . i_dim == j : return np . ones_like ( * args [ 0 ] ) else : return np . zeros_like ( * args [ 0 ] )
Returns the derivative of the function with respect to the W dimension . This should only exist when n_dims > = 4 .
12,842
def _derY ( self , x , y ) : if _isscalar ( x ) : x_pos = max ( min ( self . xSearchFunc ( self . x_list , x ) , self . x_n - 1 ) , 1 ) y_pos = max ( min ( self . ySearchFunc ( self . y_list , y ) , self . y_n - 1 ) , 1 ) else : x_pos = self . xSearchFunc ( self . x_list , x ) x_pos [ x_pos < 1 ] = 1 x_pos [ x_pos > self . x_n - 1 ] = self . x_n - 1 y_pos = self . ySearchFunc ( self . y_list , y ) y_pos [ y_pos < 1 ] = 1 y_pos [ y_pos > self . y_n - 1 ] = self . y_n - 1 alpha = ( x - self . x_list [ x_pos - 1 ] ) / ( self . x_list [ x_pos ] - self . x_list [ x_pos - 1 ] ) dfdy = ( ( ( 1 - alpha ) * self . f_values [ x_pos - 1 , y_pos ] + alpha * self . f_values [ x_pos , y_pos ] ) - ( ( 1 - alpha ) * self . f_values [ x_pos - 1 , y_pos - 1 ] + alpha * self . f_values [ x_pos , y_pos - 1 ] ) ) / ( self . y_list [ y_pos ] - self . y_list [ y_pos - 1 ] ) return dfdy
Returns the derivative with respect to y of the interpolated function at each value in x y . Only called internally by HARKinterpolator2D . derivativeY .
12,843
def _derX ( self , x , y , z ) : if _isscalar ( x ) : x_pos = max ( min ( self . xSearchFunc ( self . x_list , x ) , self . x_n - 1 ) , 1 ) y_pos = max ( min ( self . ySearchFunc ( self . y_list , y ) , self . y_n - 1 ) , 1 ) z_pos = max ( min ( self . zSearchFunc ( self . z_list , z ) , self . z_n - 1 ) , 1 ) else : x_pos = self . xSearchFunc ( self . x_list , x ) x_pos [ x_pos < 1 ] = 1 x_pos [ x_pos > self . x_n - 1 ] = self . x_n - 1 y_pos = self . ySearchFunc ( self . y_list , y ) y_pos [ y_pos < 1 ] = 1 y_pos [ y_pos > self . y_n - 1 ] = self . y_n - 1 z_pos = self . zSearchFunc ( self . z_list , z ) z_pos [ z_pos < 1 ] = 1 z_pos [ z_pos > self . z_n - 1 ] = self . z_n - 1 beta = ( y - self . y_list [ y_pos - 1 ] ) / ( self . y_list [ y_pos ] - self . y_list [ y_pos - 1 ] ) gamma = ( z - self . z_list [ z_pos - 1 ] ) / ( self . z_list [ z_pos ] - self . z_list [ z_pos - 1 ] ) dfdx = ( ( ( 1 - beta ) * ( 1 - gamma ) * self . f_values [ x_pos , y_pos - 1 , z_pos - 1 ] + ( 1 - beta ) * gamma * self . f_values [ x_pos , y_pos - 1 , z_pos ] + beta * ( 1 - gamma ) * self . f_values [ x_pos , y_pos , z_pos - 1 ] + beta * gamma * self . f_values [ x_pos , y_pos , z_pos ] ) - ( ( 1 - beta ) * ( 1 - gamma ) * self . f_values [ x_pos - 1 , y_pos - 1 , z_pos - 1 ] + ( 1 - beta ) * gamma * self . f_values [ x_pos - 1 , y_pos - 1 , z_pos ] + beta * ( 1 - gamma ) * self . f_values [ x_pos - 1 , y_pos , z_pos - 1 ] + beta * gamma * self . f_values [ x_pos - 1 , y_pos , z_pos ] ) ) / ( self . x_list [ x_pos ] - self . x_list [ x_pos - 1 ] ) return dfdx
Returns the derivative with respect to x of the interpolated function at each value in x y z . Only called internally by HARKinterpolator3D . derivativeX .
12,844
def _evalAndDer ( self , x ) : m = len ( x ) fx = np . zeros ( ( m , self . funcCount ) ) for j in range ( self . funcCount ) : fx [ : , j ] = self . functions [ j ] ( x ) fx [ np . isnan ( fx ) ] = np . inf i = np . argmin ( fx , axis = 1 ) y = fx [ np . arange ( m ) , i ] dydx = np . zeros_like ( y ) for j in range ( self . funcCount ) : c = i == j dydx [ c ] = self . functions [ j ] . derivative ( x [ c ] ) return y , dydx
Returns the level and first derivative of the function at each value in x . Only called internally by HARKinterpolator1D . eval_and_der .
12,845
def derivativeX ( self , x , y ) : xShift = self . lowerBound ( y ) dfdx_out = self . func . derivativeX ( x - xShift , y ) return dfdx_out
Evaluate the first derivative with respect to x of the function at given state space points .
12,846
def derivativeY ( self , x , y ) : xShift , xShiftDer = self . lowerBound . eval_with_derivative ( y ) dfdy_out = self . func . derivativeY ( x - xShift , y ) - xShiftDer * self . func . derivativeX ( x - xShift , y ) return dfdy_out
Evaluate the first derivative with respect to y of the function at given state space points .
12,847
def derivativeZ ( self , x , y , z ) : xShift = self . lowerBound ( y ) dfdz_out = self . func . derivativeZ ( x - xShift , y , z ) return dfdz_out
Evaluate the first derivative with respect to z of the function at given state space points .
12,848
def _derY ( self , x , y , z ) : if _isscalar ( x ) : z_pos = max ( min ( np . searchsorted ( self . z_list , z ) , self . z_n - 1 ) , 1 ) alpha = ( z - self . z_list [ z_pos - 1 ] ) / ( self . z_list [ z_pos ] - self . z_list [ z_pos - 1 ] ) dfdy = ( 1 - alpha ) * self . xyInterpolators [ z_pos - 1 ] . derivativeY ( x , y ) + alpha * self . xyInterpolators [ z_pos ] . derivativeY ( x , y ) else : m = len ( x ) z_pos = np . searchsorted ( self . z_list , z ) z_pos [ z_pos > self . z_n - 1 ] = self . z_n - 1 z_pos [ z_pos < 1 ] = 1 dfdy = np . zeros ( m ) + np . nan if x . size > 0 : for i in range ( 1 , self . z_n ) : c = z_pos == i if np . any ( c ) : alpha = ( z [ c ] - self . z_list [ i - 1 ] ) / ( self . z_list [ i ] - self . z_list [ i - 1 ] ) dfdy [ c ] = ( 1 - alpha ) * self . xyInterpolators [ i - 1 ] . derivativeY ( x [ c ] , y [ c ] ) + alpha * self . xyInterpolators [ i ] . derivativeY ( x [ c ] , y [ c ] ) return dfdy
Returns the derivative with respect to y of the interpolated function at each value in x y z . Only called internally by HARKinterpolator3D . derivativeY .
12,849
def _derY ( self , x , y ) : x_pos , y_pos = self . findSector ( x , y ) alpha , beta = self . findCoords ( x , y , x_pos , y_pos ) xA = self . x_values [ x_pos , y_pos ] xB = self . x_values [ x_pos + 1 , y_pos ] xC = self . x_values [ x_pos , y_pos + 1 ] xD = self . x_values [ x_pos + 1 , y_pos + 1 ] yA = self . y_values [ x_pos , y_pos ] yB = self . y_values [ x_pos + 1 , y_pos ] yC = self . y_values [ x_pos , y_pos + 1 ] yD = self . y_values [ x_pos + 1 , y_pos + 1 ] fA = self . f_values [ x_pos , y_pos ] fB = self . f_values [ x_pos + 1 , y_pos ] fC = self . f_values [ x_pos , y_pos + 1 ] fD = self . f_values [ x_pos + 1 , y_pos + 1 ] alpha_x = ( 1 - beta ) * ( xB - xA ) + beta * ( xD - xC ) alpha_y = ( 1 - beta ) * ( yB - yA ) + beta * ( yD - yC ) beta_x = ( 1 - alpha ) * ( xC - xA ) + alpha * ( xD - xB ) beta_y = ( 1 - alpha ) * ( yC - yA ) + alpha * ( yD - yB ) det = alpha_x * beta_y - beta_x * alpha_y y_alpha = - beta_x / det y_beta = alpha_x / det dfda = ( 1 - beta ) * ( fB - fA ) + beta * ( fD - fC ) dfdb = ( 1 - alpha ) * ( fC - fA ) + alpha * ( fD - fB ) dfdy = y_alpha * dfda + y_beta * dfdb return dfdy
Returns the derivative with respect to y of the interpolated function at each value in x y . Only called internally by HARKinterpolator2D . derivativeX .
12,850
def distanceMetric ( thing_A , thing_B ) : typeA = type ( thing_A ) typeB = type ( thing_B ) if typeA is list and typeB is list : lenA = len ( thing_A ) lenB = len ( thing_B ) if lenA == lenB : distance_temp = [ ] for n in range ( lenA ) : distance_temp . append ( distanceMetric ( thing_A [ n ] , thing_B [ n ] ) ) distance = max ( distance_temp ) else : distance = float ( abs ( lenA - lenB ) ) elif ( typeA is int or typeB is float ) and ( typeB is int or typeB is float ) : distance = float ( abs ( thing_A - thing_B ) ) elif hasattr ( thing_A , 'shape' ) and hasattr ( thing_B , 'shape' ) : if thing_A . shape == thing_B . shape : distance = np . max ( abs ( thing_A - thing_B ) ) else : distance = np . max ( abs ( thing_A . shape - thing_B . shape ) ) elif thing_A . __class__ . __name__ == thing_B . __class__ . __name__ : if thing_A . __class__ . __name__ == 'function' : distance = 0.0 else : distance = thing_A . distance ( thing_B ) else : distance = 1000.0 return distance
A universal distance metric that can be used as a default in many settings .
12,851
def distance ( self , other ) : distance_list = [ 0.0 ] for attr_name in self . distance_criteria : try : obj_A = getattr ( self , attr_name ) obj_B = getattr ( other , attr_name ) distance_list . append ( distanceMetric ( obj_A , obj_B ) ) except : distance_list . append ( 1000.0 ) return max ( distance_list )
A generic distance method which requires the existence of an attribute called distance_criteria giving a list of strings naming the attributes to be considered by the distance metric .
12,852
def assignParameters ( self , ** kwds ) : for key in kwds : setattr ( self , key , kwds [ key ] )
Assign an arbitrary number of attributes to this agent .
12,853
def getAvg ( self , varname , ** kwds ) : if hasattr ( self , varname ) : return np . mean ( getattr ( self , varname ) , ** kwds ) else : return np . nan
Calculates the average of an attribute of this instance . Returns NaN if no such attribute .
12,854
def timeFlip ( self ) : for name in self . time_vary : exec ( 'self.' + name + '.reverse()' ) self . time_flow = not self . time_flow
Reverse the flow of time for this instance .
12,855
def addToTimeVary ( self , * params ) : for param in params : if param not in self . time_vary : self . time_vary . append ( param )
Adds any number of parameters to time_vary for this instance .
12,856
def addToTimeInv ( self , * params ) : for param in params : if param not in self . time_inv : self . time_inv . append ( param )
Adds any number of parameters to time_inv for this instance .
12,857
def delFromTimeVary ( self , * params ) : for param in params : if param in self . time_vary : self . time_vary . remove ( param )
Removes any number of parameters from time_vary for this instance .
12,858
def delFromTimeInv ( self , * params ) : for param in params : if param in self . time_inv : self . time_inv . remove ( param )
Removes any number of parameters from time_inv for this instance .
12,859
def solve ( self , verbose = False ) : with np . errstate ( divide = 'ignore' , over = 'ignore' , under = 'ignore' , invalid = 'ignore' ) : self . preSolve ( ) self . solution = solveAgent ( self , verbose ) if self . time_flow : self . solution . reverse ( ) self . addToTimeVary ( 'solution' ) self . postSolve ( )
Solve the model for this instance of an agent type by backward induction . Loops through the sequence of one period problems passing the solution from period t + 1 to the problem for period t .
12,860
def checkElementsOfTimeVaryAreLists ( self ) : for param in self . time_vary : assert type ( getattr ( self , param ) ) == list , param + ' is not a list, but should be' + ' because it is in time_vary'
A method to check that elements of time_vary are lists .
12,861
def simDeath ( self ) : print ( 'AgentType subclass must define method simDeath!' ) who_dies = np . ones ( self . AgentCount , dtype = bool ) return who_dies
Determines which agents in the current population die or should be replaced . Takes no inputs returns a Boolean array of size self . AgentCount which has True for agents who die and False for those that survive . Returns all False by default must be overwritten by a subclass to have replacement events .
12,862
def solveAgents ( self ) : try : multiThreadCommands ( self . agents , [ 'solve()' ] ) except Exception as err : if self . print_parallel_error_once : self . print_parallel_error_once = False print ( "**** WARNING: could not execute multiThreadCommands in HARK.core.Market.solveAgents(), so using the serial version instead. This will likely be slower. The multiTreadCommands() functions failed with the following error:" , '\n ' , sys . exc_info ( ) [ 0 ] , ':' , err ) multiThreadCommandsFake ( self . agents , [ 'solve()' ] )
Solves the microeconomic problem for all AgentTypes in this market .
12,863
def solve ( self ) : go = True max_loops = self . max_loops completed_loops = 0 old_dynamics = None while go : self . solveAgents ( ) self . makeHistory ( ) new_dynamics = self . updateDynamics ( ) if completed_loops > 0 : distance = new_dynamics . distance ( old_dynamics ) else : distance = 1000000.0 old_dynamics = new_dynamics completed_loops += 1 go = distance >= self . tolerance and completed_loops < max_loops self . dynamics = new_dynamics
Solves the market by finding a dynamic rule that governs the aggregate market state such that when agents believe in these dynamics their actions collectively generate the same dynamic rule .
12,864
def reap ( self ) : for var_name in self . reap_vars : harvest = [ ] for this_type in self . agents : harvest . append ( getattr ( this_type , var_name ) ) setattr ( self , var_name , harvest )
Collects attributes named in reap_vars from each AgentType in the market storing them in respectively named attributes of self .
12,865
def sow ( self ) : for var_name in self . sow_vars : this_seed = getattr ( self , var_name ) for this_type in self . agents : setattr ( this_type , var_name , this_seed )
Distributes attrributes named in sow_vars from self to each AgentType in the market storing them in respectively named attributes .
12,866
def mill ( self ) : reap_vars_string = '' for name in self . reap_vars : reap_vars_string += ' \'' + name + '\' : self.' + name + ',' const_vars_string = '' for name in self . const_vars : const_vars_string += ' \'' + name + '\' : self.' + name + ',' mill_dict = eval ( '{' + reap_vars_string + const_vars_string + '}' ) product = self . millRule ( ** mill_dict ) for j in range ( len ( self . sow_vars ) ) : this_var = self . sow_vars [ j ] this_product = getattr ( product , this_var ) setattr ( self , this_var , this_product )
Processes the variables collected from agents using the function millRule storing the results in attributes named in aggr_sow .
12,867
def store ( self ) : for var_name in self . track_vars : value_now = getattr ( self , var_name ) getattr ( self , var_name + '_hist' ) . append ( value_now )
Record the current value of each variable X named in track_vars in an attribute named X_hist .
12,868
def updateDynamics ( self ) : history_vars_string = '' arg_names = list ( getArgNames ( self . calcDynamics ) ) if 'self' in arg_names : arg_names . remove ( 'self' ) for name in arg_names : history_vars_string += ' \'' + name + '\' : self.' + name + '_hist,' update_dict = eval ( '{' + history_vars_string + '}' ) dynamics = self . calcDynamics ( ** update_dict ) for var_name in self . dyn_vars : this_obj = getattr ( dynamics , var_name ) for this_type in self . agents : setattr ( this_type , var_name , this_obj ) return dynamics
Calculates a new aggregate dynamic rule using the history of variables named in track_vars and distributes this rule to AgentTypes in agents .
12,869
def getArgNames ( function ) : argCount = function . __code__ . co_argcount argNames = function . __code__ . co_varnames [ : argCount ] return argNames
Returns a list of strings naming all of the arguments for the passed function .
12,870
def approxMeanOneLognormal ( N , sigma = 1.0 , ** kwargs ) : mu_adj = - 0.5 * sigma ** 2 pmf , X = approxLognormal ( N = N , mu = mu_adj , sigma = sigma , ** kwargs ) return [ pmf , X ]
Calculate a discrete approximation to a mean one lognormal distribution . Based on function approxLognormal ; see that function s documentation for further notes .
12,871
def approxBeta ( N , a = 1.0 , b = 1.0 ) : P = 1000 vals = np . reshape ( stats . beta . ppf ( np . linspace ( 0.0 , 1.0 , N * P ) , a , b ) , ( N , P ) ) X = np . mean ( vals , axis = 1 ) pmf = np . ones ( N ) / float ( N ) return ( [ pmf , X ] )
Calculate a discrete approximation to the beta distribution . May be quite slow as it uses a rudimentary numeric integration method to generate the discrete approximation .
12,872
def approxUniform ( N , bot = 0.0 , top = 1.0 ) : pmf = np . ones ( N ) / float ( N ) center = ( top + bot ) / 2.0 width = ( top - bot ) / 2.0 X = center + width * np . linspace ( - ( N - 1.0 ) / 2.0 , ( N - 1.0 ) / 2.0 , N ) / ( N / 2.0 ) return [ pmf , X ]
Makes a discrete approximation to a uniform distribution given its bottom and top limits and number of points .
12,873
def addDiscreteOutcomeConstantMean ( distribution , x , p , sort = False ) : X = np . append ( x , distribution [ 1 ] * ( 1 - p * x ) / ( 1 - p ) ) pmf = np . append ( p , distribution [ 0 ] * ( 1 - p ) ) if sort : indices = np . argsort ( X ) X = X [ indices ] pmf = pmf [ indices ] return ( [ pmf , X ] )
Adds a discrete outcome of x with probability p to an existing distribution holding constant the relative probabilities of other outcomes and overall mean .
12,874
def makeGridExpMult ( ming , maxg , ng , timestonest = 20 ) : if timestonest > 0 : Lming = ming Lmaxg = maxg for j in range ( timestonest ) : Lming = np . log ( Lming + 1 ) Lmaxg = np . log ( Lmaxg + 1 ) Lgrid = np . linspace ( Lming , Lmaxg , ng ) grid = Lgrid for j in range ( timestonest ) : grid = np . exp ( grid ) - 1 else : Lming = np . log ( ming ) Lmaxg = np . log ( maxg ) Lstep = ( Lmaxg - Lming ) / ( ng - 1 ) Lgrid = np . arange ( Lming , Lmaxg + 0.000001 , Lstep ) grid = np . exp ( Lgrid ) return ( grid )
Make a multi - exponentially spaced grid .
12,875
def calcWeightedAvg ( data , weights ) : data_avg = np . mean ( data , axis = 1 ) weighted_sum = np . dot ( data_avg , weights ) return weighted_sum
Generates a weighted average of simulated data . The Nth row of data is averaged and then weighted by the Nth element of weights in an aggregate average .
12,876
def kernelRegression ( x , y , bot = None , top = None , N = 500 , h = None ) : if bot is None : bot = np . min ( x ) if top is None : top = np . max ( x ) if h is None : h = 2.0 * ( top - bot ) / float ( N ) x_vec = np . linspace ( bot , top , num = N ) y_vec = np . zeros_like ( x_vec ) + np . nan for j in range ( N ) : x_here = x_vec [ j ] weights = epanechnikovKernel ( x , x_here , h ) y_vec [ j ] = np . dot ( weights , y ) / np . sum ( weights ) regression = interp1d ( x_vec , y_vec , bounds_error = False , assume_sorted = True ) return regression
Performs a non - parametric Nadaraya - Watson 1D kernel regression on given data with optionally specified range number of points and kernel bandwidth .
12,877
def epanechnikovKernel ( x , ref_x , h = 1.0 ) : u = ( x - ref_x ) / h these = np . abs ( u ) <= 1.0 out = np . zeros_like ( x ) out [ these ] = 0.75 * ( 1.0 - u [ these ] ** 2.0 ) return out
The Epanechnikov kernel .
12,878
def runStickyEregressionsInStata ( infile_name , interval_size , meas_err , sticky , all_specs , stata_exe ) : dofile = "StickyETimeSeries.do" infile_name_full = os . path . abspath ( results_dir + infile_name + ".txt" ) temp_name_full = os . path . abspath ( results_dir + "temp.txt" ) if meas_err : meas_err_stata = 1 else : meas_err_stata = 0 cmd = [ stata_exe , "do" , dofile , infile_name_full , temp_name_full , str ( interval_size ) , str ( meas_err_stata ) ] stata_status = subprocess . call ( cmd , shell = 'true' ) if stata_status != 0 : raise ValueError ( 'Stata code could not run. Check the stata_exe in StickyEparams.py' ) stata_output = pd . read_csv ( temp_name_full , sep = ',' , header = 0 ) panel_text = makeResultsPanel ( Coeffs = stata_output . CoeffsArray , StdErrs = stata_output . StdErrArray , Rsq = stata_output . RsqArray , Pvals = stata_output . PvalArray , OID = stata_output . OIDarray , Counts = stata_output . ExtraInfo , meas_err = meas_err , sticky = sticky , all_specs = all_specs ) return panel_text
Runs regressions for the main tables of the StickyC paper in Stata and produces a LaTeX table with results for one panel . Running in Stata allows production of the KP - statistic for which there is currently no command in statsmodels . api .
12,879
def calcValueAtBirth ( cLvlHist , BirthBool , PlvlHist , MrkvHist , DiscFac , CRRA ) : J = np . max ( MrkvHist ) + 1 T = MrkvHist . size I = cLvlHist . shape [ 1 ] u = lambda c : CRRAutility ( c , gam = CRRA ) BirthsByPeriod = np . sum ( BirthBool , axis = 1 ) BirthsByState = np . zeros ( J , dtype = int ) for j in range ( J ) : these = MrkvHist == j BirthsByState [ j ] = np . sum ( BirthsByPeriod [ these ] ) N = np . max ( BirthsByState ) vArray = np . zeros ( ( J , N ) ) + np . nan n = np . zeros ( J , dtype = int ) DiscVec = DiscFac ** np . arange ( T ) for i in range ( I ) : birth_t = np . where ( BirthBool [ : , i ] ) [ 0 ] for k in range ( birth_t . size - 1 ) : t0 = birth_t [ k ] t1 = birth_t [ k + 1 ] span = t1 - t0 j = MrkvHist [ t0 ] cVec = cLvlHist [ t0 : t1 , i ] / PlvlHist [ t0 ] uVec = u ( cVec ) v = np . dot ( DiscVec [ : span ] , uVec ) vArray [ j , n [ j ] ] = v n [ j ] += 1 vAtBirth = np . nanmean ( vArray , axis = 1 ) return vAtBirth
Calculate expected value of being born in each Markov state using the realizations of consumption for a history of many consumers . The histories should already be trimmed of the burn in periods .
12,880
def makeResultsTable ( caption , panels , counts , filename , label ) : if caption is not None : note_size = '\\footnotesize' else : note_size = '\\tiny' note = '\\multicolumn{6}{p{0.95\\textwidth}}{' + note_size + ' \\textbf{Notes:} ' if counts [ 1 ] > 1 : note += 'Reported statistics are the average values for ' + str ( counts [ 1 ] ) + ' samples of ' + str ( counts [ 0 ] ) + ' simulated quarters each. ' note += 'Bullets indicate that the average sample coefficient divided by average sample standard error is outside of the inner 90\%, 95\%, and 99\% of the standard normal distribution. ' else : note += 'Reported statistics are for a single simulation of ' + str ( counts [ 0 ] ) + ' quarters. ' note += 'Stars indicate statistical significance at the 90\%, 95\%, and 99\% levels, respectively. ' note += 'Instruments $\\textbf{Z}_t = \\{\Delta \log \mathbf{C}_{t-2}, \Delta \log \mathbf{C}_{t-3}, \Delta \log \mathbf{Y}_{t-2}, \Delta \log \mathbf{Y}_{t-3}, A_{t-2}, A_{t-3}, \Delta_8 \log \mathbf{C}_{t-2}, \Delta_8 \log \mathbf{Y}_{t-2} \\}$.' note += '}' if caption is not None : output = '\\begin{minipage}{\\textwidth}\n' output += '\\begin{table} \caption{' + caption + '} \\label{' + label + '} \n' output += ' \\centerline{$ \Delta \log \mathbf{C}_{t+1} = \\varsigma + \chi \Delta \log \mathbf{C}_t + \eta \mathbb{E}_t[\Delta \log \mathbf{Y}_{t+1}] + \\alpha A_t + \epsilon_{t+1} $}\n' else : output = '\\begin{center} \n' output += '$ \Delta \log \mathbf{C}_{t+1} = \\varsigma + \chi \Delta \log \mathbf{C}_t + \eta \mathbb{E}_t[\Delta \log \mathbf{Y}_{t+1}] + \\alpha A_t + \epsilon_{t+1} $ \\\\ \n' output += '\\begin{tabular}{d{4}d{4}d{5}cd{4}c}\n \\toprule \n' output += '\multicolumn{3}{c}{Expectations : Dep Var} & OLS & \multicolumn{1}{c}{2${}^{\\text{nd}}$ Stage} & \multicolumn{1}{c}{KP $p$-val} \n' output += '\\\\ \multicolumn{3}{c}{Independent Variables} & or IV & \multicolumn{1}{c}{$\\bar{R}^{2} $} & \multicolumn{1}{c}{Hansen J $p$-val} \n' for panel in panels : output += panel output += '\\\\ \\bottomrule \n ' + note + '\n' output += '\end{tabular}\n' if caption is not None : output += '\end{table}\n' output += '\end{minipage}\n' else : output += '\end{center}\n' with open ( tables_dir + filename + '.tex' , 'w' ) as f : f . write ( output ) f . close ( )
Make a time series regression results table by piecing together one or more panels . Saves a tex file to disk in the tables directory .
12,881
def solveConsPrefShock ( solution_next , IncomeDstn , PrefShkDstn , LivPrb , DiscFac , CRRA , Rfree , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool ) : solver = ConsPrefShockSolver ( solution_next , IncomeDstn , PrefShkDstn , LivPrb , DiscFac , CRRA , Rfree , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool ) solver . prepareToSolve ( ) solution = solver . solve ( ) return solution
Solves a single period of a consumption - saving model with preference shocks to marginal utility . Problem is solved using the method of endogenous gridpoints .
12,882
def solveConsKinkyPref ( solution_next , IncomeDstn , PrefShkDstn , LivPrb , DiscFac , CRRA , Rboro , Rsave , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool ) : solver = ConsKinkyPrefSolver ( solution_next , IncomeDstn , PrefShkDstn , LivPrb , DiscFac , CRRA , Rboro , Rsave , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool ) solver . prepareToSolve ( ) solution = solver . solve ( ) return solution
Solves a single period of a consumption - saving model with preference shocks to marginal utility and a different interest rate on saving vs borrowing . Problem is solved using the method of endogenous gridpoints .
12,883
def getShocks ( self ) : IndShockConsumerType . getShocks ( self ) PrefShkNow = np . zeros ( self . AgentCount ) for t in range ( self . T_cycle ) : these = t == self . t_cycle N = np . sum ( these ) if N > 0 : PrefShkNow [ these ] = self . RNG . permutation ( approxMeanOneLognormal ( N , sigma = self . PrefShkStd [ t ] ) [ 1 ] ) self . PrefShkNow = PrefShkNow
Gets permanent and transitory income shocks for this period as well as preference shocks .
12,884
def getPointsForInterpolation ( self , EndOfPrdvP , aNrmNow ) : c_base = self . uPinv ( EndOfPrdvP ) PrefShkCount = self . PrefShkVals . size PrefShk_temp = np . tile ( np . reshape ( self . PrefShkVals ** ( 1.0 / self . CRRA ) , ( PrefShkCount , 1 ) ) , ( 1 , c_base . size ) ) self . cNrmNow = np . tile ( c_base , ( PrefShkCount , 1 ) ) * PrefShk_temp self . mNrmNow = self . cNrmNow + np . tile ( aNrmNow , ( PrefShkCount , 1 ) ) m_for_interpolation = np . concatenate ( ( self . BoroCnstNat * np . ones ( ( PrefShkCount , 1 ) ) , self . mNrmNow ) , axis = 1 ) c_for_interpolation = np . concatenate ( ( np . zeros ( ( PrefShkCount , 1 ) ) , self . cNrmNow ) , axis = 1 ) return c_for_interpolation , m_for_interpolation
Find endogenous interpolation points for each asset point and each discrete preference shock .
12,885
def simBirth ( self , which_agents ) : AggShockConsumerType . simBirth ( self , which_agents ) if hasattr ( self , 'pLvlErrNow' ) : self . pLvlErrNow [ which_agents ] = 1.0 else : self . pLvlErrNow = np . ones ( self . AgentCount )
Makes new consumers for the given indices . Slightly extends base method by also setting pLvlErrNow = 1 . 0 for new agents indicating that they correctly perceive their productivity .
12,886
def getUpdaters ( self ) : how_many_update = int ( round ( self . UpdatePrb * self . AgentCount ) ) base_bool = np . zeros ( self . AgentCount , dtype = bool ) base_bool [ 0 : how_many_update ] = True self . update = self . RNG . permutation ( base_bool ) self . dont = np . logical_not ( self . update )
Determine which agents update this period vs which don t . Fills in the attributes update and dont as boolean arrays of size AgentCount .
12,887
def getStates ( self ) : pLvlPrev = self . pLvlNow self . pLvlNow = pLvlPrev * self . PermShkNow self . PlvlAggNow *= self . PermShkAggNow self . pLvlTrue = self . pLvlNow * self . pLvlErrNow RfreeNow = self . getRfree ( ) bLvlNow = RfreeNow * self . aLvlNow yLvlNow = self . pLvlTrue * self . TranShkNow mLvlTrueNow = bLvlNow + yLvlNow mNrmPcvdNow = mLvlTrueNow / self . pLvlNow self . mNrmNow = mNrmPcvdNow self . mLvlTrueNow = mLvlTrueNow self . yLvlNow = yLvlNow
Gets simulated consumers pLvl and mNrm for this period but with the alteration that these represent perceived rather than actual values . Also calculates mLvlTrue the true level of market resources that the individual has on hand .
12,888
def getUpdaters ( self ) : StickyEconsumerType . getUpdaters ( self ) if hasattr ( self , 'MrkvNowPcvd' ) : self . MrkvNowPcvd [ self . update ] = self . MrkvNow else : self . MrkvNowPcvd = np . ones ( self . AgentCount , dtype = int ) * self . MrkvNow
Determine which agents update this period vs which don t . Fills in the attributes update and dont as boolean arrays of size AgentCount . This version also updates perceptions of the Markov state .
12,889
def getpLvlError ( self ) : pLvlErr = np . ones ( self . AgentCount ) pLvlErr [ self . dont ] = self . PermShkAggNow / self . PermGroFacAgg [ self . MrkvNowPcvd [ self . dont ] ] return pLvlErr
Calculates and returns the misperception of this period s shocks . Updaters have no misperception this period while those who don t update don t see the value of the aggregate permanent shock and thus base their belief about aggregate growth on the last Markov state that they actually observed which is stored in MrkvNowPcvd .
12,890
def simBirth ( self , which_agents ) : super ( self . __class__ , self ) . simBirth ( which_agents ) if self . t_sim == 0 : self . pLvlTrue = np . ones ( self . AgentCount ) self . aLvlNow = self . aNrmNow * self . pLvlTrue
Makes new consumers for the given indices . Slightly extends base method by also setting pLvlTrue = 1 . 0 in the very first simulated period .
12,891
def simBirth ( self , which_agents ) : if which_agents == np . array ( [ True ] ) : RepAgentMarkovConsumerType . simBirth ( self , which_agents ) if self . t_sim == 0 : self . pLvlTrue = np . ones ( self . AgentCount ) self . aLvlNow = self . aNrmNow * self . pLvlTrue StateCount = self . MrkvArray . shape [ 0 ] self . pLvlNow = np . ones ( StateCount ) self . MrkvPcvd = np . zeros ( StateCount ) self . MrkvPcvd [ self . MrkvNow [ 0 ] ] = 1.0
Makes new consumers for the given indices . Slightly extends base method by also setting pLvlTrue = 1 . 0 in the very first simulated period as well as initializing the perception of aggregate productivity for each Markov state . The representative agent begins with the correct perception of the Markov state .
12,892
def getControls ( self ) : StateCount = self . MrkvArray . shape [ 0 ] t = self . t_cycle [ 0 ] cNrmNow = np . zeros ( StateCount ) for i in range ( StateCount ) : cNrmNow [ i ] = self . solution [ t ] . cFunc [ i ] ( self . mNrmNow [ i ] ) self . cNrmNow = cNrmNow self . cLvlNow = np . dot ( cNrmNow * self . pLvlNow , self . MrkvPcvd )
Calculates consumption for the representative agent using the consumption functions . Takes the weighted average of cLvl across perceived Markov states .
12,893
def multiThreadCommandsFake ( agent_list , command_list , num_jobs = None ) : for agent in agent_list : for command in command_list : exec ( 'agent.' + command )
Executes the list of commands in command_list for each AgentType in agent_list in an ordinary single - threaded loop . Each command should be a method of that AgentType subclass . This function exists so as to easily disable multithreading as it uses the same syntax as multithreadCommands .
12,894
def multiThreadCommands ( agent_list , command_list , num_jobs = None ) : if len ( agent_list ) == 1 : multiThreadCommandsFake ( agent_list , command_list ) return None if num_jobs is None : num_jobs = min ( len ( agent_list ) , multiprocessing . cpu_count ( ) ) agent_list_out = Parallel ( n_jobs = num_jobs ) ( delayed ( runCommands ) ( * args ) for args in zip ( agent_list , len ( agent_list ) * [ command_list ] ) ) for j in range ( len ( agent_list ) ) : agent_list [ j ] = agent_list_out [ j ]
Executes the list of commands in command_list for each AgentType in agent_list using a multithreaded system . Each command should be a method of that AgentType subclass .
12,895
def minimizeNelderMead ( objectiveFunction , parameter_guess , verbose = False , ** kwargs ) : t0 = time ( ) OUTPUT = fmin ( objectiveFunction , parameter_guess , full_output = 1 , maxiter = 1000 , disp = verbose , ** kwargs ) t1 = time ( ) xopt = OUTPUT [ 0 ] fopt = OUTPUT [ 1 ] optiter = OUTPUT [ 2 ] funcalls = OUTPUT [ 3 ] warnflag = OUTPUT [ 4 ] if warnflag != 0 : warnings . warn ( "Minimization failed! xopt=" + str ( xopt ) + ', fopt=' + str ( fopt ) + ', optiter=' + str ( optiter ) + ', funcalls=' + str ( funcalls ) + ', warnflag=' + str ( warnflag ) ) if verbose : print ( "Time to estimate is " + str ( t1 - t0 ) + " seconds." ) return xopt
Minimizes the objective function using the Nelder - Mead simplex algorithm starting from an initial parameter guess .
12,896
def minimizePowell ( objectiveFunction , parameter_guess , verbose = False ) : t0 = time ( ) OUTPUT = fmin_powell ( objectiveFunction , parameter_guess , full_output = 1 , maxiter = 1000 , disp = verbose ) t1 = time ( ) xopt = OUTPUT [ 0 ] fopt = OUTPUT [ 1 ] direc = OUTPUT [ 2 ] optiter = OUTPUT [ 3 ] funcalls = OUTPUT [ 4 ] warnflag = OUTPUT [ 5 ] if warnflag != 0 : warnings . warn ( "Minimization failed! xopt=" + str ( xopt ) + ', fopt=' + str ( fopt ) + ', direc=' + str ( direc ) + ', optiter=' + str ( optiter ) + ', funcalls=' + str ( funcalls ) + ', warnflag=' + str ( warnflag ) ) if verbose : print ( "Time to estimate is " + str ( t1 - t0 ) + " seconds." ) return xopt
Minimizes the objective function using a derivative - free Powell algorithm starting from an initial parameter guess .
12,897
def findNextPoint ( DiscFac , Rfree , CRRA , PermGroFacCmp , UnempPrb , Rnrm , Beth , cNext , mNext , MPCnext , PFMPC ) : uPP = lambda x : utilityPP ( x , gam = CRRA ) cNow = PermGroFacCmp * ( DiscFac * Rfree ) ** ( - 1.0 / CRRA ) * cNext * ( 1 + UnempPrb * ( ( cNext / ( PFMPC * ( mNext - 1.0 ) ) ) ** CRRA - 1.0 ) ) ** ( - 1.0 / CRRA ) mNow = ( PermGroFacCmp / Rfree ) * ( mNext - 1.0 ) + cNow cUNext = PFMPC * ( mNow - cNow ) * Rnrm natural = Beth * Rnrm * ( 1.0 / uPP ( cNow ) ) * ( ( 1.0 - UnempPrb ) * uPP ( cNext ) * MPCnext + UnempPrb * uPP ( cUNext ) * PFMPC ) MPCnow = natural / ( natural + 1 ) return mNow , cNow , MPCnow
Calculates what consumption market resources and the marginal propensity to consume must have been in the previous period given model parameters and values of market resources consumption and MPC today .
12,898
def postSolve ( self ) : self . solution [ 0 ] . mNrm_list . insert ( 0 , 0.0 ) self . solution [ 0 ] . cNrm_list . insert ( 0 , 0.0 ) self . solution [ 0 ] . MPC_list . insert ( 0 , self . MPCmax ) self . solution [ 0 ] . cFunc = CubicInterp ( self . solution [ 0 ] . mNrm_list , self . solution [ 0 ] . cNrm_list , self . solution [ 0 ] . MPC_list , self . PFMPC * ( self . h - 1.0 ) , self . PFMPC ) self . solution [ 0 ] . cFunc_U = lambda m : self . PFMPC * m
This method adds consumption at m = 0 to the list of stable arm points then constructs the consumption function as a cubic interpolation over those points . Should be run after the backshooting routine is complete .
12,899
def simBirth ( self , which_agents ) : N = np . sum ( which_agents ) self . aLvlNow [ which_agents ] = drawLognormal ( N , mu = self . aLvlInitMean , sigma = self . aLvlInitStd , seed = self . RNG . randint ( 0 , 2 ** 31 - 1 ) ) self . eStateNow [ which_agents ] = 1.0 self . t_age [ which_agents ] = 0 self . t_cycle [ which_agents ] = 0 return None
Makes new consumers for the given indices . Initialized variables include aNrm as well as time variables t_age and t_cycle . Normalized assets are drawn from a lognormal distributions given by aLvlInitMean and aLvlInitStd .