idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
10,500
def enable_sphinx_if_possible ( ) : if SPHINX_INSTALLED : srcdir = tempfile . mkdtemp ( ) outdir = os . path . join ( srcdir , '_build' ) try : sphinx . application . Sphinx ( srcdir = srcdir , confdir = None , outdir = outdir , doctreedir = outdir , buildername = 'dummy' , status = None ) yield finally : shutil . rmtree ( srcdir ) else : yield
Register Sphinx directives and roles .
10,501
def main ( ) : args = parse_args ( ) if not args . files : return 0 with enable_sphinx_if_possible ( ) : status = 0 pool = multiprocessing . Pool ( multiprocessing . cpu_count ( ) ) try : if len ( args . files ) > 1 : results = pool . map ( _check_file , [ ( name , args ) for name in args . files ] ) else : results = [ _check_file ( ( args . files [ 0 ] , args ) ) ] for ( filename , errors ) in results : for error in errors : line_number = error [ 0 ] message = error [ 1 ] if not re . match ( r'\([A-Z]+/[0-9]+\)' , message ) : message = '(ERROR/3) ' + message output_message ( '{}:{}: {}' . format ( filename , line_number , message ) ) status = 1 except ( IOError , UnicodeError ) as exception : output_message ( exception ) status = 1 return status
Return 0 on success .
10,502
def run ( self ) : try : language = self . arguments [ 0 ] except IndexError : language = '' code = '\n' . join ( self . content ) literal = docutils . nodes . literal_block ( code , code ) literal [ 'classes' ] . append ( 'code-block' ) literal [ 'language' ] = language return [ literal ]
Run directive .
10,503
def visit_literal_block ( self , node ) : language = node . get ( 'language' , None ) is_code_node = False if not language : is_code_node = True classes = node . get ( 'classes' ) if 'code' in classes : language = classes [ - 1 ] else : return if language in self . ignore [ 'languages' ] : return if language == 'doctest' or ( language == 'python' and node . rawsource . lstrip ( ) . startswith ( '>>> ' ) ) : self . visit_doctest_block ( node ) raise docutils . nodes . SkipNode checker = { 'bash' : bash_checker , 'c' : c_checker , 'cpp' : cpp_checker , 'json' : lambda source , _ : lambda : check_json ( source ) , 'xml' : lambda source , _ : lambda : check_xml ( source ) , 'python' : lambda source , _ : lambda : check_python ( source ) , 'rst' : lambda source , _ : lambda : check_rst ( source , ignore = self . ignore ) } . get ( language ) if checker : run = checker ( node . rawsource , self . working_directory ) self . _add_check ( node = node , run = run , language = language , is_code_node = is_code_node ) raise docutils . nodes . SkipNode
Check syntax of code block .
10,504
def visit_paragraph ( self , node ) : find = re . search ( r'\[[^\]]+\]\([^\)]+\)' , node . rawsource ) if find is not None : self . document . reporter . warning ( '(rst) Link is formatted in Markdown style.' , base_node = node )
Check syntax of reStructuredText .
10,505
def _add_check ( self , node , run , language , is_code_node ) : def run_check ( ) : all_results = run ( ) if all_results is not None : if all_results : for result in all_results : error_offset = result [ 0 ] - 1 line_number = getattr ( node , 'line' , None ) if line_number is not None : yield ( beginning_of_code_block ( node = node , line_number = line_number , full_contents = self . contents , is_code_node = is_code_node ) + error_offset , '({}) {}' . format ( language , result [ 1 ] ) ) else : yield ( self . filename , 0 , 'unknown error' ) self . checkers . append ( run_check )
Add checker that will be run .
10,506
def translate ( self ) : visitor = CheckTranslator ( self . document , contents = self . contents , filename = self . filename , ignore = self . ignore ) self . document . walkabout ( visitor ) self . checkers += visitor . checkers
Run CheckTranslator .
10,507
def import_backend ( config ) : backend_name = config [ 'backend' ] path = backend_name . split ( '.' ) backend_mod_name , backend_class_name = '.' . join ( path [ : - 1 ] ) , path [ - 1 ] backend_mod = importlib . import_module ( backend_mod_name ) backend_class = getattr ( backend_mod , backend_class_name ) return backend_class ( config [ 'settings' ] )
Imports and initializes the Backend class .
10,508
def queue_once_key ( name , kwargs , restrict_to = None ) : keys = [ 'qo' , force_string ( name ) ] if restrict_to is not None : restrict_kwargs = { key : kwargs [ key ] for key in restrict_to } keys += kwargs_to_list ( restrict_kwargs ) else : keys += kwargs_to_list ( kwargs ) key = "_" . join ( keys ) return key
Turns a list the name of the task the kwargs and allowed keys into a redis key .
10,509
def raise_or_lock ( self , key , timeout ) : lock_path = self . _get_lock_path ( key ) try : fd = os . open ( lock_path , os . O_CREAT | os . O_EXCL ) except OSError as error : if error . errno == errno . EEXIST : mtime = os . path . getmtime ( lock_path ) ttl = mtime + timeout - time . time ( ) if ttl > 0 : raise AlreadyQueued ( ttl ) else : os . utime ( lock_path , None ) return else : raise else : os . close ( fd )
Check the lock file and create one if it does not exist .
10,510
def clear_lock ( self , key ) : lock_path = self . _get_lock_path ( key ) os . remove ( lock_path )
Remove the lock file .
10,511
def apply_async ( self , args = None , kwargs = None , ** options ) : once_options = options . get ( 'once' , { } ) once_graceful = once_options . get ( 'graceful' , self . once . get ( 'graceful' , False ) ) once_timeout = once_options . get ( 'timeout' , self . once . get ( 'timeout' , self . default_timeout ) ) if not options . get ( 'retries' ) : key = self . get_key ( args , kwargs ) try : self . once_backend . raise_or_lock ( key , timeout = once_timeout ) except AlreadyQueued as e : if once_graceful : return EagerResult ( None , None , states . REJECTED ) raise e return super ( QueueOnce , self ) . apply_async ( args , kwargs , ** options )
Attempts to queues a task . Will raises an AlreadyQueued exception if already queued .
10,512
def cached_dataframe ( self , csv_path , compute_fn ) : if not csv_path . endswith ( ".csv" ) : raise ValueError ( "Invalid path '%s', must be a CSV file" % csv_path ) if csv_path in self . _memory_cache : return self . _memory_cache [ csv_path ] if exists ( csv_path ) and not self . is_empty ( csv_path ) : df = self . _read_csv ( csv_path ) else : df = compute_fn ( ) if not isinstance ( df , pd . DataFrame ) : raise TypeError ( "Expected compute_fn to return DataFrame, got %s : %s" % ( df , type ( df ) ) ) self . _write_csv ( df , csv_path ) self . _memory_cache [ csv_path ] = df return df
If a CSV path is in the _memory_cache then return that cached value .
10,513
def cached_object ( self , path , compute_fn ) : if path in self . _memory_cache : return self . _memory_cache [ path ] if exists ( path ) and not self . is_empty ( path ) : obj = load_pickle ( path ) else : obj = compute_fn ( ) dump_pickle ( obj , path ) self . _memory_cache [ path ] = obj return obj
If cached_object has already been called for a value of path in this running Python instance then it should have a cached value in the _memory_cache ; return that value .
10,514
def _parse_header_id ( line ) : if type ( line ) is not binary_type : raise TypeError ( "Expected header line to be of type %s but got %s" % ( binary_type , type ( line ) ) ) if len ( line ) <= 1 : raise ValueError ( "No identifier on FASTA line" ) space_index = line . find ( b" " ) if space_index >= 0 : identifier = line [ 1 : space_index ] else : identifier = line [ 1 : ] dot_index = identifier . find ( b"." ) if dot_index >= 0 : identifier = identifier [ : dot_index ] return identifier . decode ( "ascii" )
Pull the transcript or protein identifier from the header line which starts with >
10,515
def read_file ( self , fasta_path ) : fasta_dictionary = { } for ( identifier , sequence ) in self . iterate_over_file ( fasta_path ) : fasta_dictionary [ identifier ] = sequence return fasta_dictionary
Read the contents of a FASTA file into a dictionary
10,516
def iterate_over_file ( self , fasta_path ) : with self . _open ( fasta_path ) as f : for line in f : line = line . rstrip ( ) if len ( line ) == 0 : continue first_char = line [ 0 : 1 ] if first_char == b">" : id_and_seq = self . _read_header ( line ) if id_and_seq is not None : yield id_and_seq elif first_char == b";" : continue else : self . current_lines . append ( line ) id_and_seq = self . _current_entry ( ) if id_and_seq is not None : yield id_and_seq
Generator that yields identifiers paired with sequences .
10,517
def _open ( self , fasta_path ) : if fasta_path . endswith ( "gz" ) or fasta_path . endswith ( "gzip" ) : return GzipFile ( fasta_path , 'rb' ) else : return open ( fasta_path , 'rb' )
Open either a text file or compressed gzip file as a stream of bytes .
10,518
def normalize_reference_name ( name ) : lower_name = name . strip ( ) . lower ( ) for reference in Species . _reference_names_to_species . keys ( ) : if reference . lower ( ) == lower_name : return reference raise ValueError ( "Reference genome '%s' not found" % name )
Search the dictionary of species - specific references to find a reference name that matches aside from capitalization .
10,519
def genome_for_reference_name ( reference_name , allow_older_downloaded_release = True ) : reference_name = normalize_reference_name ( reference_name ) species = find_species_by_reference ( reference_name ) ( min_ensembl_release , max_ensembl_release ) = species . reference_assemblies [ reference_name ] if allow_older_downloaded_release : for release in reversed ( range ( min_ensembl_release , max_ensembl_release + 1 ) ) : candidate = EnsemblRelease . cached ( release = release , species = species ) if candidate . required_local_files_exist ( ) : return candidate return EnsemblRelease . cached ( release = max_ensembl_release , species = species )
Given a genome reference name such as GRCh38 returns the corresponding Ensembl Release object .
10,520
def normalize_species_name ( name ) : lower_name = name . lower ( ) . strip ( ) if lower_name in Species . _common_names_to_species : return Species . _common_names_to_species [ lower_name ] . latin_name return lower_name . replace ( " " , "_" )
If species name was Homo sapiens then replace spaces with underscores and return homo_sapiens . Also replace common names like human with homo_sapiens .
10,521
def check_species_object ( species_name_or_object ) : if isinstance ( species_name_or_object , Species ) : return species_name_or_object elif isinstance ( species_name_or_object , str ) : return find_species_by_name ( species_name_or_object ) else : raise ValueError ( "Unexpected type for species: %s : %s" % ( species_name_or_object , type ( species_name_or_object ) ) )
Helper for validating user supplied species names or objects .
10,522
def register ( cls , latin_name , synonyms , reference_assemblies ) : species = Species ( latin_name = latin_name , synonyms = synonyms , reference_assemblies = reference_assemblies ) cls . _latin_names_to_species [ species . latin_name ] = species for synonym in synonyms : if synonym in cls . _common_names_to_species : raise ValueError ( "Can't use synonym '%s' for both %s and %s" % ( synonym , species , cls . _common_names_to_species [ synonym ] ) ) cls . _common_names_to_species [ synonym ] = species for reference_name in reference_assemblies : if reference_name in cls . _reference_names_to_species : raise ValueError ( "Can't use reference '%s' for both %s and %s" % ( reference_name , species , cls . _reference_names_to_species [ reference_name ] ) ) cls . _reference_names_to_species [ reference_name ] = species return species
Create a Species object from the given arguments and enter into all the dicts used to look the species up by its fields .
10,523
def offset ( self , position ) : if position > self . end or position < self . start : raise ValueError ( "Position %d outside valid range %d..%d of %s" % ( position , self . start , self . end , self ) ) elif self . on_forward_strand : return position - self . start else : return self . end - position
Offset of given position from stranded start of this locus .
10,524
def overlaps ( self , contig , start , end , strand = None ) : return ( self . can_overlap ( contig , strand ) and self . distance_to_interval ( start , end ) == 0 )
Does this locus overlap with a given range of positions?
10,525
def _memoize_cache_key ( args , kwargs ) : cache_key_list = [ ] for arg in args : if type ( arg ) is list : cache_key_list . append ( tuple ( arg ) ) else : cache_key_list . append ( arg ) for ( k , v ) in sorted ( kwargs . items ( ) ) : if type ( v ) is list : cache_key_list . append ( ( k , tuple ( v ) ) ) else : cache_key_list . append ( ( k , v ) ) return tuple ( cache_key_list )
Turn args tuple and kwargs dictionary into a hashable key .
10,526
def memoize ( fn ) : cache = { } @ wraps ( fn ) def wrapped_fn ( * args , ** kwargs ) : cache_key = _memoize_cache_key ( args , kwargs ) try : return cache [ cache_key ] except KeyError : value = fn ( * args , ** kwargs ) cache [ cache_key ] = value return value def clear_cache ( ) : cache . clear ( ) wrapped_fn . clear_cache = clear_cache wrapped_fn . cache = cache wrapped_fn . make_cache_key = _memoize_cache_key return wrapped_fn
Simple reset - able memoization decorator for functions and methods assumes that all arguments to the function can be hashed and compared .
10,527
def normalize_init_values ( cls , release , species , server ) : release = check_release_number ( release ) species = check_species_object ( species ) return ( release , species , server )
Normalizes the arguments which uniquely specify an EnsemblRelease genome .
10,528
def cached ( cls , release = MAX_ENSEMBL_RELEASE , species = human , server = ENSEMBL_FTP_SERVER ) : init_args_tuple = cls . normalize_init_values ( release , species , server ) if init_args_tuple in cls . _genome_cache : genome = cls . _genome_cache [ init_args_tuple ] else : genome = cls . _genome_cache [ init_args_tuple ] = cls ( * init_args_tuple ) return genome
Construct EnsemblRelease if it s never been made before otherwise return an old instance .
10,529
def cache_subdirectory ( reference_name = None , annotation_name = None , annotation_version = None ) : if reference_name is None : reference_name = "" if annotation_name is None : annotation_name = "" if annotation_version is None : annotation_version = "" reference_dir = join ( CACHE_BASE_SUBDIR , reference_name ) annotation_dir = "%s%s" % ( annotation_name , annotation_version ) return join ( reference_dir , annotation_dir )
Which cache subdirectory to use for a given annotation database over a particular reference . All arguments can be omitted to just get the base subdirectory for all pyensembl cached datasets .
10,530
def _fields ( self ) : return ( ( 'reference_name' , self . reference_name , ) , ( 'annotation_name' , self . annotation_name ) , ( 'annotation_version' , self . annotation_version ) , ( 'cache_directory_path' , self . cache_directory_path ) , ( 'decompress_on_download' , self . decompress_on_download ) , ( 'copy_local_files_to_cache' , self . copy_local_files_to_cache ) )
Fields used for hashing string representation equality comparison
10,531
def cached_path ( self , path_or_url ) : assert path_or_url , "Expected non-empty string for path_or_url" remote_filename = split ( path_or_url ) [ 1 ] if self . is_url_format ( path_or_url ) : local_filename = datacache . build_local_filename ( download_url = path_or_url , filename = remote_filename , decompress = False ) else : local_filename = remote_filename if self . decompress_on_download : local_filename = self . _remove_compression_suffix_if_present ( local_filename ) if len ( local_filename ) == 0 : raise ValueError ( "Can't determine local filename for %s" % ( path_or_url , ) ) return join ( self . cache_directory_path , local_filename )
When downloading remote files the default behavior is to name local files the same as their remote counterparts .
10,532
def _download_if_necessary ( self , url , download_if_missing , overwrite ) : cached_path = self . cached_path ( url ) missing = not exists ( cached_path ) if ( missing or overwrite ) and download_if_missing : logger . info ( "Fetching %s from URL %s" , cached_path , url ) datacache . download . _download_and_decompress_if_necessary ( full_path = cached_path , download_url = url , timeout = 3600 ) elif missing : raise MissingRemoteFile ( url ) return cached_path
Return local cached path to a remote file download it if necessary .
10,533
def _copy_if_necessary ( self , local_path , overwrite ) : local_path = abspath ( local_path ) if not exists ( local_path ) : raise MissingLocalFile ( local_path ) elif not self . copy_local_files_to_cache : return local_path else : cached_path = self . cached_path ( local_path ) if exists ( cached_path ) and not overwrite : return cached_path copy2 ( local_path , cached_path ) return cached_path
Return cached path to local file copying it to the cache if necessary .
10,534
def download_or_copy_if_necessary ( self , path_or_url , download_if_missing = False , overwrite = False ) : assert path_or_url , "Expected non-empty string for path_or_url" if self . is_url_format ( path_or_url ) : return self . _download_if_necessary ( path_or_url , download_if_missing , overwrite ) else : return self . _copy_if_necessary ( path_or_url , overwrite )
Download a remote file or copy Get the local path to a possibly remote file .
10,535
def delete_cached_files ( self , prefixes = [ ] , suffixes = [ ] ) : for filename in listdir ( self . cache_directory_path ) : delete = ( any ( [ filename . endswith ( ext ) for ext in suffixes ] ) or any ( [ filename . startswith ( pre ) for pre in prefixes ] ) ) if delete : path = join ( self . cache_directory_path , filename ) logger . info ( "Deleting %s" , path ) remove ( path )
Deletes any cached files matching the prefixes or suffixes given
10,536
def to_dict ( self ) : return dict ( reference_name = self . reference_name , annotation_name = self . annotation_name , annotation_version = self . annotation_version , gtf_path_or_url = self . _gtf_path_or_url , transcript_fasta_paths_or_urls = self . _transcript_fasta_paths_or_urls , protein_fasta_paths_or_urls = self . _protein_fasta_paths_or_urls , decompress_on_download = self . decompress_on_download , copy_local_files_to_cache = self . copy_local_files_to_cache , cache_directory_path = self . cache_directory_path )
Returns a dictionary of the essential fields of this Genome .
10,537
def _init_lazy_fields ( self ) : self . gtf_path = None self . _protein_sequences = None self . _transcript_sequences = None self . _db = None self . protein_fasta_paths = None self . transcript_fasta_paths = None self . _genes = { } self . _transcripts = { } self . _exons = { }
Member data that gets loaded or constructed on demand
10,538
def _get_cached_path ( self , field_name , path_or_url , download_if_missing = False , overwrite = False ) : if len ( field_name ) == 0 : raise ValueError ( "Expected non-empty field name" ) if len ( path_or_url ) == 0 : raise ValueError ( "Expected non-empty path_or_url" ) return self . download_cache . local_path_or_install_error ( field_name = field_name , path_or_url = path_or_url , download_if_missing = download_if_missing , overwrite = overwrite )
Get the local path for a possibly remote file invoking either a download or install error message if it s missing .
10,539
def download ( self , overwrite = False ) : self . _set_local_paths ( download_if_missing = True , overwrite = overwrite )
Download data files needed by this Genome instance .
10,540
def index ( self , overwrite = False ) : if self . requires_gtf : self . db . connect_or_create ( overwrite = overwrite ) if self . requires_transcript_fasta : self . transcript_sequences . index ( overwrite = overwrite ) if self . requires_protein_fasta : self . protein_sequences . index ( overwrite = overwrite )
Assuming that all necessary data for this Genome has been downloaded generate the GTF database and save efficient representation of FASTA sequence files .
10,541
def install_string ( self ) : args = [ "--reference-name" , self . reference_name , "--annotation-name" , self . annotation_name ] if self . annotation_version : args . extend ( [ "--annotation-version" , str ( self . annotation_version ) ] ) if self . requires_gtf : args . append ( "--gtf" ) args . append ( "\"%s\"" % self . _gtf_path_or_url ) if self . requires_protein_fasta : args += [ "--protein-fasta \"%s\"" % path for path in self . _protein_fasta_paths_or_urls ] if self . requires_transcript_fasta : args += [ "--transcript-fasta \"%s\"" % path for path in self . _transcript_fasta_paths_or_urls ] return "pyensembl install %s" % " " . join ( args )
Add every missing file to the install string shown to the user in an error message .
10,542
def clear_cache ( self ) : for maybe_fn in self . __dict__ . values ( ) : if hasattr ( maybe_fn , "clear_cache" ) : maybe_fn . clear_cache ( )
Clear any in - memory cached values and short - lived on - disk materializations from MemoryCache
10,543
def delete_index_files ( self ) : self . clear_cache ( ) db_path = self . db . local_db_path ( ) if exists ( db_path ) : remove ( db_path )
Delete all data aside from source GTF and FASTA files
10,544
def _all_feature_values ( self , column , feature , distinct = True , contig = None , strand = None ) : return self . db . query_feature_values ( column = column , feature = feature , distinct = distinct , contig = contig , strand = strand )
Cached lookup of all values for a particular feature property from the database caches repeated queries in memory and stores them as a CSV .
10,545
def gene_by_id ( self , gene_id ) : if gene_id not in self . _genes : field_names = [ "seqname" , "start" , "end" , "strand" , ] optional_field_names = [ "gene_name" , "gene_biotype" , ] field_names . extend ( [ name for name in optional_field_names if self . db . column_exists ( "gene" , name ) ] ) result = self . db . query_one ( field_names , filter_column = "gene_id" , filter_value = gene_id , feature = "gene" ) if not result : raise ValueError ( "Gene not found: %s" % ( gene_id , ) ) gene_name , gene_biotype = None , None assert len ( result ) >= 4 and len ( result ) <= 6 , "Result is not the expected length: %d" % len ( result ) contig , start , end , strand = result [ : 4 ] if len ( result ) == 5 : if "gene_name" in field_names : gene_name = result [ 4 ] else : gene_biotype = result [ 4 ] elif len ( result ) == 6 : gene_name , gene_biotype = result [ 4 : ] self . _genes [ gene_id ] = Gene ( gene_id = gene_id , gene_name = gene_name , contig = contig , start = start , end = end , strand = strand , biotype = gene_biotype , genome = self ) return self . _genes [ gene_id ]
Construct a Gene object for the given gene ID .
10,546
def gene_by_protein_id ( self , protein_id ) : gene_id = self . gene_id_of_protein_id ( protein_id ) return self . gene_by_id ( gene_id )
Get the gene ID associated with the given protein ID return its Gene object
10,547
def gene_id_of_protein_id ( self , protein_id ) : results = self . _query_gene_ids ( "protein_id" , protein_id , feature = "CDS" ) if len ( results ) == 0 : raise ValueError ( "Protein ID not found: %s" % protein_id ) assert len ( results ) == 1 , ( "Should have only one gene ID for a given protein ID, " "but found %d: %s" % ( len ( results ) , results ) ) return results [ 0 ]
What is the gene ID associated with a given protein ID?
10,548
def transcripts ( self , contig = None , strand = None ) : transcript_ids = self . transcript_ids ( contig = contig , strand = strand ) return [ self . transcript_by_id ( transcript_id ) for transcript_id in transcript_ids ]
Construct Transcript object for every transcript entry in the database . Optionally restrict to a particular chromosome using the contig argument .
10,549
def transcript_by_id ( self , transcript_id ) : if transcript_id not in self . _transcripts : optional_field_names = [ "transcript_name" , "transcript_biotype" , "transcript_support_level" , ] field_names = [ "seqname" , "start" , "end" , "strand" , "gene_id" , ] field_names . extend ( [ name for name in optional_field_names if self . db . column_exists ( "transcript" , name ) ] ) result = self . db . query_one ( select_column_names = field_names , filter_column = "transcript_id" , filter_value = transcript_id , feature = "transcript" , distinct = True ) if not result : raise ValueError ( "Transcript not found: %s" % ( transcript_id , ) ) transcript_name , transcript_biotype , tsl = None , None , None assert 5 <= len ( result ) <= 5 + len ( optional_field_names ) , "Result is not the expected length: %d" % len ( result ) contig , start , end , strand , gene_id = result [ : 5 ] if len ( result ) > 5 : extra_field_names = [ f for f in optional_field_names if f in field_names ] extra_data = dict ( zip ( extra_field_names , result [ 5 : ] ) ) transcript_name = extra_data . get ( "transcript_name" ) transcript_biotype = extra_data . get ( "transcript_biotype" ) tsl = extra_data . get ( "transcript_support_level" ) if not tsl or tsl == 'NA' : tsl = None else : tsl = int ( tsl ) self . _transcripts [ transcript_id ] = Transcript ( transcript_id = transcript_id , transcript_name = transcript_name , contig = contig , start = start , end = end , strand = strand , biotype = transcript_biotype , gene_id = gene_id , genome = self , support_level = tsl ) return self . _transcripts [ transcript_id ]
Construct Transcript object with given transcript ID
10,550
def transcript_id_of_protein_id ( self , protein_id ) : results = self . _query_transcript_ids ( "protein_id" , protein_id , feature = "CDS" ) if len ( results ) == 0 : raise ValueError ( "Protein ID not found: %s" % protein_id ) assert len ( results ) == 1 , ( "Should have only one transcript ID for a given protein ID, " "but found %d: %s" % ( len ( results ) , results ) ) return results [ 0 ]
What is the transcript ID associated with a given protein ID?
10,551
def exons ( self , contig = None , strand = None ) : exon_ids = self . exon_ids ( contig = contig , strand = strand ) return [ self . exon_by_id ( exon_id ) for exon_id in exon_ids ]
Create exon object for all exons in the database optionally restrict to a particular chromosome using the contig argument .
10,552
def exon_by_id ( self , exon_id ) : if exon_id not in self . _exons : field_names = [ "seqname" , "start" , "end" , "strand" , "gene_name" , "gene_id" , ] contig , start , end , strand , gene_name , gene_id = self . db . query_one ( select_column_names = field_names , filter_column = "exon_id" , filter_value = exon_id , feature = "exon" , distinct = True ) self . _exons [ exon_id ] = Exon ( exon_id = exon_id , contig = contig , start = start , end = end , strand = strand , gene_name = gene_name , gene_id = gene_id ) return self . _exons [ exon_id ]
Construct an Exon object from its ID by looking up the exon s properties in the given Database .
10,553
def check_release_number ( release ) : try : release = int ( release ) except : raise ValueError ( "Invalid Ensembl release: %s" % release ) if release < MIN_ENSEMBL_RELEASE or release > MAX_ENSEMBL_RELEASE : raise ValueError ( "Invalid Ensembl releases %d, must be between %d and %d" % ( release , MIN_ENSEMBL_RELEASE , MAX_ENSEMBL_RELEASE ) ) return release
Check to make sure a release is in the valid range of Ensembl releases .
10,554
def _species_subdir ( ensembl_release , species = "homo_sapiens" , filetype = "gtf" , server = ENSEMBL_FTP_SERVER ) : return SPECIES_SUBDIR_TEMPLATE % { "release" : ensembl_release , "filetype" : filetype , "species" : species , }
Assume ensembl_release has already been normalize by calling function but species might be either a common name or latin name .
10,555
def normalize_release_properties ( ensembl_release , species ) : ensembl_release = check_release_number ( ensembl_release ) if not isinstance ( species , Species ) : species = find_species_by_name ( species ) reference_name = species . which_reference ( ensembl_release ) return ensembl_release , species . latin_name , reference_name
Make sure a given release is valid normalize it to be an integer normalize the species name and get its associated reference .
10,556
def make_gtf_url ( ensembl_release , species , server = ENSEMBL_FTP_SERVER ) : ensembl_release , species , _ = normalize_release_properties ( ensembl_release , species ) subdir = _species_subdir ( ensembl_release , species = species , filetype = "gtf" , server = server ) url_subdir = urllib_parse . urljoin ( server , subdir ) filename = make_gtf_filename ( ensembl_release = ensembl_release , species = species ) return join ( url_subdir , filename )
Returns a URL and a filename which can be joined together .
10,557
def make_fasta_url ( ensembl_release , species , sequence_type , server = ENSEMBL_FTP_SERVER ) : ensembl_release , species , reference_name = normalize_release_properties ( ensembl_release , species ) subdir = _species_subdir ( ensembl_release , species = species , filetype = "fasta" , server = server ) server_subdir = urllib_parse . urljoin ( server , subdir ) server_sequence_subdir = join ( server_subdir , sequence_type ) filename = make_fasta_filename ( ensembl_release = ensembl_release , species = species , sequence_type = sequence_type ) return join ( server_sequence_subdir , filename )
Construct URL to FASTA file with cDNA transcript or protein sequences
10,558
def _transcript_feature_positions ( self , feature ) : ranges = self . _transcript_feature_position_ranges ( feature , required = True ) results = [ ] for ( start , end ) in ranges : for position in range ( start , end + 1 ) : assert position not in results , "Repeated position %d for %s" % ( position , feature ) results . append ( position ) return results
Get unique positions for feature raise an error if feature is absent .
10,559
def spliced_offset ( self , position ) : assert type ( position ) == int , "Position argument must be an integer, got %s : %s" % ( position , type ( position ) ) if position < self . start or position > self . end : raise ValueError ( "Invalid position: %d (must be between %d and %d)" % ( position , self . start , self . end ) ) unspliced_offset = self . offset ( position ) total_spliced_offset = 0 for exon in self . exons : exon_unspliced_start , exon_unspliced_end = self . offset_range ( exon . start , exon . end ) if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end : exon_offset = unspliced_offset - exon_unspliced_start return total_spliced_offset + exon_offset else : exon_length = len ( exon ) total_spliced_offset += exon_length raise ValueError ( "Couldn't find position %d on any exon of %s" % ( position , self . id ) )
Convert from an absolute chromosomal position to the offset into this transcript s spliced mRNA .
10,560
def _contiguous_offsets ( self , offsets ) : offsets . sort ( ) for i in range ( len ( offsets ) - 1 ) : assert offsets [ i ] + 1 == offsets [ i + 1 ] , "Offsets not contiguous: %s" % ( offsets , ) return offsets
Sorts the input list of integer offsets ensures that values are contiguous .
10,561
def start_codon_spliced_offsets ( self ) : offsets = [ self . spliced_offset ( position ) for position in self . start_codon_positions ] return self . _contiguous_offsets ( offsets )
Offsets from start of spliced mRNA transcript of nucleotides in start codon .
10,562
def stop_codon_spliced_offsets ( self ) : offsets = [ self . spliced_offset ( position ) for position in self . stop_codon_positions ] return self . _contiguous_offsets ( offsets )
Offsets from start of spliced mRNA transcript of nucleotides in stop codon .
10,563
def complete ( self ) : return ( self . contains_start_codon and self . contains_stop_codon and self . coding_sequence is not None and len ( self . coding_sequence ) % 3 == 0 )
Consider a transcript complete if it has start and stop codons and a coding sequence whose length is divisible by 3
10,564
def _all_possible_indices ( self , column_names ) : candidate_column_groups = [ [ 'seqname' , 'start' , 'end' ] , [ 'gene_name' ] , [ 'gene_id' ] , [ 'transcript_id' ] , [ 'transcript_name' ] , [ 'exon_id' ] , [ 'protein_id' ] , [ 'ccds_id' ] , ] indices = [ ] column_set = set ( column_names ) for column_group in candidate_column_groups : skip = False for column_name in column_group : if column_name not in column_set : logger . info ( "Skipping database index for {%s}" , ", " . join ( column_group ) ) skip = True if skip : continue indices . append ( column_group ) return indices
Create list of tuples containing all possible index groups we might want to create over tables in this database .
10,565
def connection ( self ) : connection = self . _get_connection ( ) if connection : return connection else : message = "GTF database needs to be created" if self . install_string : message += ", run: %s" % self . install_string raise ValueError ( message )
Get a connection to the database or raise an exception
10,566
def connect_or_create ( self , overwrite = False ) : connection = self . _get_connection ( ) if connection : return connection else : return self . create ( overwrite = overwrite )
Return a connection to the database if it exists otherwise create it . Overwrite the existing database if overwrite is True .
10,567
def run_sql_query ( self , sql , required = False , query_params = [ ] ) : try : cursor = self . connection . execute ( sql , query_params ) except sqlite3 . OperationalError as e : error_message = e . message if hasattr ( e , 'message' ) else str ( e ) logger . warn ( "Encountered error \"%s\" from query \"%s\" with parameters %s" , error_message , sql , query_params ) raise results = cursor . fetchall ( ) if required and not results : raise ValueError ( "No results found for query:\n%s\nwith parameters: %s" % ( sql , query_params ) ) return results
Given an arbitrary SQL query run it against the database and return the results .
10,568
def query_loci ( self , filter_column , filter_value , feature ) : result_tuples = self . query ( select_column_names = [ "seqname" , "start" , "end" , "strand" ] , filter_column = filter_column , filter_value = filter_value , feature = feature , distinct = True , required = True ) return [ Locus ( contig , start , end , strand ) for ( contig , start , end , strand ) in result_tuples ]
Query for loci satisfying a given filter and feature type .
10,569
def query_locus ( self , filter_column , filter_value , feature ) : loci = self . query_loci ( filter_column = filter_column , filter_value = filter_value , feature = feature ) if len ( loci ) == 0 : raise ValueError ( "Couldn't find locus for %s with %s = %s" % ( feature , filter_column , filter_value ) ) elif len ( loci ) > 1 : raise ValueError ( "Too many loci for %s with %s = %s: %s" % ( feature , filter_column , filter_value , loci ) ) return loci [ 0 ]
Query for unique locus raises error if missing or more than one locus in the database .
10,570
def _load_gtf_as_dataframe ( self , usecols = None , features = None ) : logger . info ( "Reading GTF from %s" , self . gtf_path ) df = read_gtf ( self . gtf_path , column_converters = { "seqname" : normalize_chromosome , "strand" : normalize_strand , } , infer_biotype_column = True , usecols = usecols , features = features ) column_names = set ( df . keys ( ) ) expect_gene_feature = features is None or "gene" in features expect_transcript_feature = features is None or "transcript" in features observed_features = set ( df [ "feature" ] ) if expect_gene_feature and "gene" not in observed_features : logger . info ( "Creating missing gene features..." ) df = create_missing_features ( dataframe = df , unique_keys = { "gene" : "gene_id" } , extra_columns = { "gene" : { "gene_name" , "gene_biotype" } . intersection ( column_names ) , } , missing_value = "" ) logger . info ( "Done." ) if expect_transcript_feature and "transcript" not in observed_features : logger . info ( "Creating missing transcript features..." ) df = create_missing_features ( dataframe = df , unique_keys = { "transcript" : "transcript_id" } , extra_columns = { "transcript" : { "gene_id" , "gene_name" , "gene_biotype" , "transcript_name" , "transcript_biotype" , "protein_id" , } . intersection ( column_names ) } , missing_value = "" ) logger . info ( "Done." ) return df
Parse this genome source s GTF file and load it as a Pandas DataFrame
10,571
def transcripts ( self ) : transcript_id_results = self . db . query ( select_column_names = [ 'transcript_id' ] , filter_column = 'gene_id' , filter_value = self . id , feature = 'transcript' , distinct = False , required = False ) return [ self . genome . transcript_by_id ( result [ 0 ] ) for result in transcript_id_results ]
Property which dynamically construct transcript objects for all transcript IDs associated with this gene .
10,572
def clone_bs4_elem ( el ) : if isinstance ( el , NavigableString ) : return type ( el ) ( el ) copy = Tag ( None , el . builder , el . name , el . namespace , el . nsprefix ) copy . attrs = dict ( el . attrs ) for attr in ( 'can_be_empty_element' , 'hidden' ) : setattr ( copy , attr , getattr ( el , attr ) ) for child in el . contents : copy . append ( clone_bs4_elem ( child ) ) return copy
Clone a bs4 tag before modifying it .
10,573
def clean_ticker ( ticker ) : pattern = re . compile ( '[\W_]+' ) res = pattern . sub ( '' , ticker . split ( ' ' ) [ 0 ] ) return res . lower ( )
Cleans a ticker for easier use throughout MoneyTree
10,574
def scale ( val , src , dst ) : if val < src [ 0 ] : return dst [ 0 ] if val > src [ 1 ] : return dst [ 1 ] return ( ( val - src [ 0 ] ) / ( src [ 1 ] - src [ 0 ] ) ) * ( dst [ 1 ] - dst [ 0 ] ) + dst [ 0 ]
Scale value from src range to dst range . If value outside bounds it is clipped and set to the low or high bound of dst .
10,575
def as_format ( item , format_str = '.2f' ) : if isinstance ( item , pd . Series ) : return item . map ( lambda x : format ( x , format_str ) ) elif isinstance ( item , pd . DataFrame ) : return item . applymap ( lambda x : format ( x , format_str ) )
Map a format string over a pandas object .
10,576
def to_price_index ( returns , start = 100 ) : return ( returns . replace ( to_replace = np . nan , value = 0 ) + 1 ) . cumprod ( ) * start
Returns a price index given a series of returns .
10,577
def calc_stats ( prices ) : if isinstance ( prices , pd . Series ) : return PerformanceStats ( prices ) elif isinstance ( prices , pd . DataFrame ) : return GroupStats ( * [ prices [ x ] for x in prices . columns ] ) else : raise NotImplementedError ( 'Unsupported type' )
Calculates performance stats of a given object .
10,578
def asfreq_actual ( series , freq , method = 'ffill' , how = 'end' , normalize = False ) : orig = series is_series = False if isinstance ( series , pd . Series ) : is_series = True name = series . name if series . name else 'data' orig = pd . DataFrame ( { name : series } ) t = pd . concat ( [ orig , pd . DataFrame ( { 'dt' : orig . index . values } , index = orig . index . values ) ] , axis = 1 ) dts = t . asfreq ( freq = freq , method = method , how = how , normalize = normalize ) [ 'dt' ] res = orig . loc [ dts . values ] if is_series : return res [ name ] else : return res
Similar to pandas asfreq but keeps the actual dates . For example if last data point in Jan is on the 29th that date will be used instead of the 31st .
10,579
def calc_inv_vol_weights ( returns ) : vol = np . divide ( 1. , np . std ( returns , ddof = 1 ) ) vol [ np . isinf ( vol ) ] = np . NaN volsum = vol . sum ( ) return np . divide ( vol , volsum )
Calculates weights proportional to inverse volatility of each column .
10,580
def calc_mean_var_weights ( returns , weight_bounds = ( 0. , 1. ) , rf = 0. , covar_method = 'ledoit-wolf' , options = None ) : def fitness ( weights , exp_rets , covar , rf ) : mean = sum ( exp_rets * weights ) var = np . dot ( np . dot ( weights , covar ) , weights ) util = ( mean - rf ) / np . sqrt ( var ) return - util n = len ( returns . columns ) exp_rets = returns . mean ( ) if covar_method == 'ledoit-wolf' : covar = sklearn . covariance . ledoit_wolf ( returns ) [ 0 ] elif covar_method == 'standard' : covar = returns . cov ( ) else : raise NotImplementedError ( 'covar_method not implemented' ) weights = np . ones ( [ n ] ) / n bounds = [ weight_bounds for i in range ( n ) ] constraints = ( { 'type' : 'eq' , 'fun' : lambda W : sum ( W ) - 1. } ) optimized = minimize ( fitness , weights , ( exp_rets , covar , rf ) , method = 'SLSQP' , constraints = constraints , bounds = bounds , options = options ) if not optimized . success : raise Exception ( optimized . message ) return pd . Series ( { returns . columns [ i ] : optimized . x [ i ] for i in range ( n ) } )
Calculates the mean - variance weights given a DataFrame of returns .
10,581
def get_num_days_required ( offset , period = 'd' , perc_required = 0.90 ) : x = pd . to_datetime ( '2010-01-01' ) delta = x - ( x - offset ) days = delta . days * 0.69 if period == 'd' : req = days * perc_required elif period == 'm' : req = ( days / 20 ) * perc_required elif period == 'y' : req = ( days / 252 ) * perc_required else : raise NotImplementedError ( 'period not supported. Supported periods are d, m, y' ) return req
Estimates the number of days required to assume that data is OK .
10,582
def calc_clusters ( returns , n = None , plot = False ) : corr = returns . corr ( ) diss = 1 - corr mds = sklearn . manifold . MDS ( dissimilarity = 'precomputed' ) xy = mds . fit_transform ( diss ) def routine ( k ) : km = sklearn . cluster . KMeans ( n_clusters = k ) km_fit = km . fit ( xy ) labels = km_fit . labels_ centers = km_fit . cluster_centers_ mappings = dict ( zip ( returns . columns , labels ) ) totss = 0 withinss = 0 avg = np . array ( [ np . mean ( xy [ : , 0 ] ) , np . mean ( xy [ : , 1 ] ) ] ) for idx , lbl in enumerate ( labels ) : withinss += sum ( ( xy [ idx ] - centers [ lbl ] ) ** 2 ) totss += sum ( ( xy [ idx ] - avg ) ** 2 ) pvar_expl = 1.0 - withinss / totss return mappings , pvar_expl , labels if n : result = routine ( n ) else : n = len ( returns . columns ) n1 = int ( np . ceil ( n * 0.6666666666 ) ) for i in range ( 2 , n1 + 1 ) : result = routine ( i ) if result [ 1 ] > 0.9 : break if plot : fig , ax = plt . subplots ( ) ax . scatter ( xy [ : , 0 ] , xy [ : , 1 ] , c = result [ 2 ] , s = 90 ) for i , txt in enumerate ( returns . columns ) : ax . annotate ( txt , ( xy [ i , 0 ] , xy [ i , 1 ] ) , size = 14 ) tmp = result [ 0 ] inv_map = { } for k , v in iteritems ( tmp ) : inv_map [ v ] = inv_map . get ( v , [ ] ) inv_map [ v ] . append ( k ) return inv_map
Calculates the clusters based on k - means clustering .
10,583
def limit_weights ( weights , limit = 0.1 ) : if 1.0 / limit > len ( weights ) : raise ValueError ( 'invalid limit -> 1 / limit must be <= len(weights)' ) if isinstance ( weights , dict ) : weights = pd . Series ( weights ) if np . round ( weights . sum ( ) , 1 ) != 1.0 : raise ValueError ( 'Expecting weights (that sum to 1) - sum is %s' % weights . sum ( ) ) res = np . round ( weights . copy ( ) , 4 ) to_rebalance = ( res [ res > limit ] - limit ) . sum ( ) ok = res [ res < limit ] ok += ( ok / ok . sum ( ) ) * to_rebalance res [ res > limit ] = limit res [ res < limit ] = ok if any ( x > limit for x in res ) : return limit_weights ( res , limit = limit ) return res
Limits weights and redistributes excedent amount proportionally .
10,584
def random_weights ( n , bounds = ( 0. , 1. ) , total = 1.0 ) : low = bounds [ 0 ] high = bounds [ 1 ] if high < low : raise ValueError ( 'Higher bound must be greater or ' 'equal to lower bound' ) if n * high < total or n * low > total : raise ValueError ( 'solution not possible with given n and bounds' ) w = [ 0 ] * n tgt = - float ( total ) for i in range ( n ) : rn = n - i - 1 rhigh = rn * high rlow = rn * low lowb = max ( - rhigh - tgt , low ) highb = min ( - rlow - tgt , high ) rw = random . uniform ( lowb , highb ) w [ i ] = rw tgt += rw random . shuffle ( w ) return w
Generate pseudo - random weights .
10,585
def plot_heatmap ( data , title = 'Heatmap' , show_legend = True , show_labels = True , label_fmt = '.2f' , vmin = None , vmax = None , figsize = None , label_color = 'w' , cmap = 'RdBu' , ** kwargs ) : fig , ax = plt . subplots ( figsize = figsize ) heatmap = ax . pcolor ( data , vmin = vmin , vmax = vmax , cmap = cmap ) ax . invert_yaxis ( ) if title is not None : plt . title ( title ) if show_legend : fig . colorbar ( heatmap ) if show_labels : vals = data . values for x in range ( data . shape [ 0 ] ) : for y in range ( data . shape [ 1 ] ) : plt . text ( x + 0.5 , y + 0.5 , format ( vals [ y , x ] , label_fmt ) , horizontalalignment = 'center' , verticalalignment = 'center' , color = label_color ) plt . yticks ( np . arange ( 0.5 , len ( data . index ) , 1 ) , data . index ) plt . xticks ( np . arange ( 0.5 , len ( data . columns ) , 1 ) , data . columns ) return plt
Plot a heatmap using matplotlib s pcolor .
10,586
def rollapply ( data , window , fn ) : res = data . copy ( ) res [ : ] = np . nan n = len ( data ) if window > n : return res for i in range ( window - 1 , n ) : res . iloc [ i ] = fn ( data . iloc [ i - window + 1 : i + 1 ] ) return res
Apply a function fn over a rolling window of size window .
10,587
def _winsorize_wrapper ( x , limits ) : if isinstance ( x , pd . Series ) : if x . count ( ) == 0 : return x notnanx = ~ np . isnan ( x ) x [ notnanx ] = scipy . stats . mstats . winsorize ( x [ notnanx ] , limits = limits ) return x else : return scipy . stats . mstats . winsorize ( x , limits = limits )
Wraps scipy winsorize function to drop na s
10,588
def to_excess_returns ( returns , rf , nperiods = None ) : if type ( rf ) is float and nperiods is not None : _rf = deannualize ( rf , nperiods ) else : _rf = rf return returns - _rf
Given a series of returns it will return the excess returns over rf .
10,589
def resample_returns ( returns , func , seed = 0 , num_trials = 100 ) : if type ( returns ) is pd . Series : stats = pd . Series ( index = range ( num_trials ) ) elif type ( returns ) is pd . DataFrame : stats = pd . DataFrame ( index = range ( num_trials ) , columns = returns . columns ) else : raise ( TypeError ( "returns needs to be a Series or DataFrame!" ) ) n = returns . shape [ 0 ] for i in range ( num_trials ) : random_indices = resample ( returns . index , n_samples = n , random_state = seed + i ) stats . loc [ i ] = func ( returns . loc [ random_indices ] ) return stats
Resample the returns and calculate any statistic on every new sample .
10,590
def set_riskfree_rate ( self , rf ) : self . rf = rf self . _update ( self . prices )
Set annual risk - free rate property and calculate properly annualized monthly and daily rates . Then performance stats are recalculated . Affects only this instance of the PerformanceStats .
10,591
def display_monthly_returns ( self ) : data = [ [ 'Year' , 'Jan' , 'Feb' , 'Mar' , 'Apr' , 'May' , 'Jun' , 'Jul' , 'Aug' , 'Sep' , 'Oct' , 'Nov' , 'Dec' , 'YTD' ] ] for k in self . return_table . index : r = self . return_table . loc [ k ] . values data . append ( [ k ] + [ fmtpn ( x ) for x in r ] ) print ( tabulate ( data , headers = 'firstrow' ) )
Display a table containing monthly returns and ytd returns for every year in range .
10,592
def plot_histogram ( self , freq = None , figsize = ( 15 , 5 ) , title = None , bins = 20 , ** kwargs ) : if title is None : title = self . _get_default_plot_title ( self . name , freq , 'Return Histogram' ) ser = self . _get_series ( freq ) . to_returns ( ) . dropna ( ) plt . figure ( figsize = figsize ) ax = ser . hist ( bins = bins , figsize = figsize , normed = True , ** kwargs ) ax . set_title ( title ) plt . axvline ( 0 , linewidth = 4 ) return ser . plot ( kind = 'kde' )
Plots a histogram of returns given a return frequency .
10,593
def set_date_range ( self , start = None , end = None ) : start = self . _start if start is None else pd . to_datetime ( start ) end = self . _end if end is None else pd . to_datetime ( end ) self . _update ( self . _prices . loc [ start : end ] )
Update date range of stats charts etc . If None then the original date range is used . So to reset to the original range just call with no args .
10,594
def display ( self ) : data = [ ] first_row = [ 'Stat' ] first_row . extend ( self . _names ) data . append ( first_row ) stats = self . _stats ( ) for stat in stats : k , n , f = stat if k is None : row = [ '' ] * len ( data [ 0 ] ) data . append ( row ) continue row = [ n ] for key in self . _names : raw = getattr ( self [ key ] , k ) if k == 'rf' and not type ( raw ) == float : row . append ( np . nan ) elif f is None : row . append ( raw ) elif f == 'p' : row . append ( fmtp ( raw ) ) elif f == 'n' : row . append ( fmtn ( raw ) ) elif f == 'dt' : row . append ( raw . strftime ( '%Y-%m-%d' ) ) else : raise NotImplementedError ( 'unsupported format %s' % f ) data . append ( row ) print ( tabulate ( data , headers = 'firstrow' ) )
Display summary stats table .
10,595
def display_lookback_returns ( self ) : return self . lookback_returns . apply ( lambda x : x . map ( '{:,.2%}' . format ) , axis = 1 )
Displays the current lookback returns for each series .
10,596
def plot ( self , freq = None , figsize = ( 15 , 5 ) , title = None , logy = False , ** kwargs ) : if title is None : title = self . _get_default_plot_title ( freq , 'Equity Progression' ) ser = self . _get_series ( freq ) . rebase ( ) return ser . plot ( figsize = figsize , logy = logy , title = title , ** kwargs )
Helper function for plotting the series .
10,597
def plot_scatter_matrix ( self , freq = None , title = None , figsize = ( 10 , 10 ) , ** kwargs ) : if title is None : title = self . _get_default_plot_title ( freq , 'Return Scatter Matrix' ) plt . figure ( ) ser = self . _get_series ( freq ) . to_returns ( ) . dropna ( ) pd . scatter_matrix ( ser , figsize = figsize , ** kwargs ) return plt . suptitle ( title )
Wrapper around pandas scatter_matrix .
10,598
def plot_histograms ( self , freq = None , title = None , figsize = ( 10 , 10 ) , ** kwargs ) : if title is None : title = self . _get_default_plot_title ( freq , 'Return Histogram Matrix' ) plt . figure ( ) ser = self . _get_series ( freq ) . to_returns ( ) . dropna ( ) ser . hist ( figsize = figsize , ** kwargs ) return plt . suptitle ( title )
Wrapper around pandas hist .
10,599
def plot_correlation ( self , freq = None , title = None , figsize = ( 12 , 6 ) , ** kwargs ) : if title is None : title = self . _get_default_plot_title ( freq , 'Return Correlation Matrix' ) rets = self . _get_series ( freq ) . to_returns ( ) . dropna ( ) return rets . plot_corr_heatmap ( title = title , figsize = figsize , ** kwargs )
Utility function to plot correlations .