idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
2,800
|
def _load_permissions ( self ) : result = _P ( needs = set ( ) , excludes = set ( ) ) if not self . allow_by_default : result . needs . update ( self . explicit_needs ) for explicit_need in self . explicit_needs : if explicit_need . method == 'action' : action = current_access . get_action_cache ( self . _cache_key ( explicit_need ) ) if action is None : action = _P ( needs = set ( ) , excludes = set ( ) ) actionsusers = ActionUsers . query_by_action ( explicit_need ) . all ( ) actionsroles = ActionRoles . query_by_action ( explicit_need ) . join ( ActionRoles . role ) . all ( ) actionssystem = ActionSystemRoles . query_by_action ( explicit_need ) . all ( ) for db_action in chain ( actionsusers , actionsroles , actionssystem ) : if db_action . exclude : action . excludes . add ( db_action . need ) else : action . needs . add ( db_action . need ) current_access . set_action_cache ( self . _cache_key ( explicit_need ) , action ) result . update ( action ) elif self . allow_by_default : result . needs . add ( explicit_need ) self . _permissions = result
|
Load permissions associated to actions .
|
2,801
|
def lazy_result ( f ) : @ wraps ( f ) def decorated ( ctx , param , value ) : return LocalProxy ( lambda : f ( ctx , param , value ) ) return decorated
|
Decorate function to return LazyProxy .
|
2,802
|
def process_action ( ctx , param , value ) : actions = current_app . extensions [ 'invenio-access' ] . actions if value not in actions : raise click . BadParameter ( 'Action "%s" is not registered.' , value ) return actions [ value ]
|
Return an action if exists .
|
2,803
|
def process_email ( ctx , param , value ) : user = User . query . filter ( User . email == value ) . first ( ) if not user : raise click . BadParameter ( 'User with email \'%s\' not found.' , value ) return user
|
Return an user if it exists .
|
2,804
|
def process_role ( ctx , param , value ) : role = Role . query . filter ( Role . name == value ) . first ( ) if not role : raise click . BadParameter ( 'Role with name \'%s\' not found.' , value ) return role
|
Return a role if it exists .
|
2,805
|
def allow_user ( user ) : def processor ( action , argument ) : db . session . add ( ActionUsers . allow ( action , argument = argument , user_id = user . id ) ) return processor
|
Allow a user identified by an email address .
|
2,806
|
def allow_role ( role ) : def processor ( action , argument ) : db . session . add ( ActionRoles . allow ( action , argument = argument , role_id = role . id ) ) return processor
|
Allow a role identified by an email address .
|
2,807
|
def process_allow_action ( processors , action , argument ) : for processor in processors : processor ( action , argument ) db . session . commit ( )
|
Process allow action .
|
2,808
|
def deny_user ( user ) : def processor ( action , argument ) : db . session . add ( ActionUsers . deny ( action , argument = argument , user_id = user . id ) ) return processor
|
Deny a user identified by an email address .
|
2,809
|
def deny_role ( role ) : def processor ( action , argument ) : db . session . add ( ActionRoles . deny ( action , argument = argument , role_id = role . id ) ) return processor
|
Deny a role identified by an email address .
|
2,810
|
def process_deny_action ( processors , action , argument ) : for processor in processors : processor ( action , argument ) db . session . commit ( )
|
Process deny action .
|
2,811
|
def remove_global ( ) : def processor ( action , argument ) : ActionUsers . query_by_action ( action , argument = argument ) . filter ( ActionUsers . user_id . is_ ( None ) ) . delete ( synchronize_session = False ) return processor
|
Remove global action rule .
|
2,812
|
def remove_user ( user ) : def processor ( action , argument ) : ActionUsers . query_by_action ( action , argument = argument ) . filter ( ActionUsers . user_id == user . id ) . delete ( synchronize_session = False ) return processor
|
Remove a action for a user .
|
2,813
|
def remove_role ( role ) : def processor ( action , argument ) : ActionRoles . query_by_action ( action , argument = argument ) . filter ( ActionRoles . role_id == role . id ) . delete ( synchronize_session = False ) return processor
|
Remove a action for a role .
|
2,814
|
def process_remove_action ( processors , action , argument ) : for processor in processors : processor ( action , argument ) db . session . commit ( )
|
Process action removals .
|
2,815
|
def list_actions ( ) : for name , action in _current_actions . items ( ) : click . echo ( '{0}:{1}' . format ( name , '*' if hasattr ( action , 'argument' ) else '' ) )
|
List all registered actions .
|
2,816
|
def show_actions ( email , role ) : if email : actions = ActionUsers . query . join ( ActionUsers . user ) . filter ( User . email . in_ ( email ) ) . all ( ) for action in actions : click . secho ( 'user:{0}:{1}:{2}:{3}' . format ( action . user . email , action . action , '' if action . argument is None else action . argument , 'deny' if action . exclude else 'allow' , ) , fg = 'red' if action . exclude else 'green' ) if role : actions = ActionRoles . query . filter ( Role . name . in_ ( role ) ) . join ( ActionRoles . role ) . all ( ) for action in actions : click . secho ( 'role:{0}:{1}:{2}:{3}' . format ( action . role . name , action . action , '' if action . argument is None else action . argument , 'deny' if action . exclude else 'allow' , ) , fg = 'red' if action . exclude else 'green' )
|
Show all assigned actions .
|
2,817
|
def run_mhc_gene_assessment ( job , rsem_files , rna_haplotype , univ_options , reports_options ) : return job . addChildJobFn ( assess_mhc_genes , rsem_files [ 'rsem.genes.results' ] , rna_haplotype , univ_options , reports_options ) . rv ( )
|
A wrapper for assess_mhc_genes .
|
2,818
|
def parse_config_file ( job , config_file ) : job . fileStore . logToMaster ( 'Parsing config file' ) config_file = os . path . abspath ( config_file ) if not os . path . exists ( config_file ) : raise ParameterError ( 'The config file was not found at specified location. Please verify ' + 'and retry.' ) sample_set = defaultdict ( ) univ_options = defaultdict ( ) tool_options = defaultdict ( ) with open ( config_file , 'r' ) as conf : for line in conf : line = line . strip ( ) if line . startswith ( '##' ) or len ( line ) == 0 : continue if line . startswith ( 'BEGIN' ) : break for groupname , group_params in tool_specific_param_generator ( job , conf ) : if groupname == 'patient' : if 'patient_id' not in group_params . keys ( ) : raise ParameterError ( 'A patient group is missing the patient_id flag.' ) sample_set [ group_params [ 'patient_id' ] ] = group_params elif groupname == 'Universal_Options' : univ_options = group_params required_options = { 'java_Xmx' , 'output_folder' , 'storage_location' } missing_opts = required_options . difference ( set ( univ_options . keys ( ) ) ) if len ( missing_opts ) > 0 : raise ParameterError ( ' The following options have no arguments in the config ' 'file :\n' + '\n' . join ( missing_opts ) ) if univ_options [ 'sse_key_is_master' ] : assert univ_options [ 'sse_key_is_master' ] in ( 'True' , 'true' , 'False' , 'false' ) univ_options [ 'sse_key_is_master' ] = univ_options [ 'sse_key_is_master' ] in ( 'True' , 'true' ) else : tool_options [ groupname ] = group_params required_tools = { 'cutadapt' , 'bwa' , 'star' , 'phlat' , 'transgene' , 'mut_callers' , 'rsem' , 'mhci' , 'mhcii' , 'snpeff' , 'rank_boost' } missing_tools = required_tools . difference ( set ( tool_options . keys ( ) ) ) if len ( missing_tools ) > 0 : raise ParameterError ( ' The following tools have no arguments in the config file : \n' + '\n' . join ( missing_tools ) ) for patient_id in sample_set . keys ( ) : job . addFollowOnJobFn ( pipeline_launchpad , sample_set [ patient_id ] , univ_options , tool_options ) return None
|
This module will parse the config file withing params and set up the variables that will be passed to the various tools in the pipeline .
|
2,819
|
def run_cutadapt ( job , fastqs , univ_options , cutadapt_options ) : job . fileStore . logToMaster ( 'Running cutadapt on %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) fq_extn = '.gz' if fastqs [ 'gzipped' ] else '' input_files = { 'rna_1.fastq' + fq_extn : fastqs [ 'tumor_rna' ] [ 0 ] , 'rna_2.fastq' + fq_extn : fastqs [ 'tumor_rna' ] [ 1 ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ '-a' , cutadapt_options [ 'a' ] , '-A' , cutadapt_options [ 'A' ] , '-m' , '35' , '-o' , docker_path ( 'rna_cutadapt_1.fastq' ) , '-p' , docker_path ( 'rna_cutadapt_2.fastq' ) , input_files [ 'rna_1.fastq' ] , input_files [ 'rna_2.fastq' ] ] docker_call ( tool = 'cutadapt' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files = defaultdict ( ) for fastq_file in [ 'rna_cutadapt_1.fastq' , 'rna_cutadapt_2.fastq' ] : output_files [ fastq_file ] = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , fastq_file ] ) ) return output_files
|
This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners .
|
2,820
|
def run_star ( job , fastqs , univ_options , star_options ) : assert star_options [ 'type' ] in ( 'star' , 'starlong' ) job . fileStore . logToMaster ( 'Running STAR on %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'rna_cutadapt_1.fastq' : fastqs [ 'rna_cutadapt_1.fastq' ] , 'rna_cutadapt_2.fastq' : fastqs [ 'rna_cutadapt_2.fastq' ] , 'star_index.tar.gz' : star_options [ 'index_tar' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ '--runThreadN' , str ( star_options [ 'n' ] ) , '--genomeDir' , input_files [ 'star_index' ] , '--outFileNamePrefix' , 'rna' , '--readFilesIn' , input_files [ 'rna_cutadapt_1.fastq' ] , input_files [ 'rna_cutadapt_2.fastq' ] , '--outSAMattributes' , 'NH' , 'HI' , 'AS' , 'NM' , 'MD' , '--outSAMtype' , 'BAM' , 'SortedByCoordinate' , '--quantMode' , 'TranscriptomeSAM' , '--outSAMunmapped' , 'Within' ] if star_options [ 'type' ] == 'star' : docker_call ( tool = 'star' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) else : docker_call ( tool = 'starlong' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files = defaultdict ( ) for bam_file in [ 'rnaAligned.toTranscriptome.out.bam' , 'rnaAligned.sortedByCoord.out.bam' ] : output_files [ bam_file ] = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , bam_file ] ) ) job . fileStore . deleteGlobalFile ( fastqs [ 'rna_cutadapt_1.fastq' ] ) job . fileStore . deleteGlobalFile ( fastqs [ 'rna_cutadapt_2.fastq' ] ) index_star = job . wrapJobFn ( index_bamfile , output_files [ 'rnaAligned.sortedByCoord.out.bam' ] , 'rna' , univ_options , disk = '120G' ) job . addChild ( index_star ) output_files [ 'rnaAligned.sortedByCoord.out.bam' ] = index_star . rv ( ) return output_files
|
This module uses STAR to align the RNA fastqs to the reference
|
2,821
|
def run_bwa ( job , fastqs , sample_type , univ_options , bwa_options ) : job . fileStore . logToMaster ( 'Running bwa on %s:%s' % ( univ_options [ 'patient' ] , sample_type ) ) work_dir = job . fileStore . getLocalTempDir ( ) fq_extn = '.gz' if fastqs [ 'gzipped' ] else '' input_files = { 'dna_1.fastq' + fq_extn : fastqs [ sample_type ] [ 0 ] , 'dna_2.fastq' + fq_extn : fastqs [ sample_type ] [ 1 ] , 'bwa_index.tar.gz' : bwa_options [ 'index_tar' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'mem' , '-t' , str ( bwa_options [ 'n' ] ) , '-v' , '1' , '/' . join ( [ input_files [ 'bwa_index' ] , 'hg19.fa' ] ) , input_files [ 'dna_1.fastq' ] , input_files [ 'dna_2.fastq' ] ] with open ( '' . join ( [ work_dir , '/' , sample_type , '_aligned.sam' ] ) , 'w' ) as samfile : docker_call ( tool = 'bwa' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = samfile ) output_file = job . fileStore . writeGlobalFile ( samfile . name ) samfile_processing = job . wrapJobFn ( bam_conversion , output_file , sample_type , univ_options , disk = '60G' ) job . addChild ( samfile_processing ) return samfile_processing . rv ( )
|
This module aligns the SAMPLE_TYPE dna fastqs to the reference
|
2,822
|
def bam_conversion ( job , samfile , sample_type , univ_options ) : job . fileStore . logToMaster ( 'Running sam2bam on %s:%s' % ( univ_options [ 'patient' ] , sample_type ) ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'aligned.sam' : samfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) bamfile = '/' . join ( [ work_dir , 'aligned.bam' ] ) parameters = [ 'view' , '-bS' , '-o' , docker_path ( bamfile ) , input_files [ 'aligned.sam' ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_file = job . fileStore . writeGlobalFile ( bamfile ) job . fileStore . deleteGlobalFile ( samfile ) reheader_bam = job . wrapJobFn ( fix_bam_header , output_file , sample_type , univ_options , disk = '60G' ) job . addChild ( reheader_bam ) return reheader_bam . rv ( )
|
This module converts SAMFILE from sam to bam
|
2,823
|
def fix_bam_header ( job , bamfile , sample_type , univ_options ) : job . fileStore . logToMaster ( 'Running reheader on %s:%s' % ( univ_options [ 'patient' ] , sample_type ) ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'aligned.bam' : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'view' , '-H' , input_files [ 'aligned.bam' ] ] with open ( '/' . join ( [ work_dir , 'aligned_bam.header' ] ) , 'w' ) as headerfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = headerfile ) with open ( headerfile . name , 'r' ) as headerfile , open ( '/' . join ( [ work_dir , 'output_bam.header' ] ) , 'w' ) as outheaderfile : for line in headerfile : if line . startswith ( '@PG' ) : line = '\t' . join ( [ x for x in line . strip ( ) . split ( '\t' ) if not x . startswith ( 'CL' ) ] ) print ( line . strip ( ) , file = outheaderfile ) parameters = [ 'reheader' , docker_path ( outheaderfile . name ) , input_files [ 'aligned.bam' ] ] with open ( '/' . join ( [ work_dir , 'aligned_fixPG.bam' ] ) , 'w' ) as fixpg_bamfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = fixpg_bamfile ) output_file = job . fileStore . writeGlobalFile ( fixpg_bamfile . name ) job . fileStore . deleteGlobalFile ( bamfile ) add_rg = job . wrapJobFn ( add_readgroups , output_file , sample_type , univ_options , disk = '60G' ) job . addChild ( add_rg ) return add_rg . rv ( )
|
This module modified the header in BAMFILE
|
2,824
|
def run_rsem ( job , star_bams , univ_options , rsem_options ) : job . fileStore . logToMaster ( 'Running rsem index on %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'star_transcriptome.bam' : star_bams [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rsem_index.tar.gz' : rsem_options [ 'index_tar' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ '--paired-end' , '-p' , str ( rsem_options [ 'n' ] ) , '--bam' , input_files [ 'star_transcriptome.bam' ] , '--no-bam-output' , '/' . join ( [ input_files [ 'rsem_index' ] , 'hg19' ] ) , 'rsem' ] docker_call ( tool = 'rsem' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_file = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , 'rsem.isoforms.results' ] ) ) return output_file
|
This module will run rsem on the RNA Bam file .
|
2,825
|
def merge_radia ( job , perchrom_rvs ) : job . fileStore . logToMaster ( 'Running merge_radia' ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { filename : jsid for perchrom_files in perchrom_rvs . values ( ) for filename , jsid in perchrom_files . items ( ) } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) chromosomes = [ '' . join ( [ 'chr' , str ( x ) ] ) for x in range ( 1 , 23 ) + [ 'X' , 'Y' ] ] with open ( '/' . join ( [ work_dir , 'radia_calls.vcf' ] ) , 'w' ) as radfile , open ( '/' . join ( [ work_dir , 'radia_filter_passing_calls.vcf' ] ) , 'w' ) as radpassfile : for chrom in chromosomes : with open ( input_files [ '' . join ( [ 'radia_filtered_' , chrom , '.vcf' ] ) ] , 'r' ) as filtradfile : for line in filtradfile : line = line . strip ( ) if line . startswith ( '#' ) : if chrom == 'chr1' : print ( line , file = radfile ) print ( line , file = radpassfile ) continue else : print ( line , file = radfile ) line = line . split ( '\t' ) if line [ 6 ] == 'PASS' and 'MT=GERM' not in line [ 7 ] : print ( '\t' . join ( line ) , file = radpassfile ) with open ( radpassfile . name , 'r' ) as radpassfile , open ( '/' . join ( [ work_dir , 'radia_parsed_filter_passing_calls.vcf' ] ) , 'w' ) as parsedradfile : parse_radia_multi_alt ( radpassfile , parsedradfile ) output_files = defaultdict ( ) for radia_file in [ radfile . name , parsedradfile . name ] : output_files [ os . path . basename ( radia_file ) ] = job . fileStore . writeGlobalFile ( radia_file ) return output_files
|
This module will merge the per - chromosome radia files created by spawn_radia into a genome vcf . It will make 2 vcfs one for PASSing non - germline calls and one for all calls .
|
2,826
|
def run_radia ( job , bams , univ_options , radia_options , chrom ) : job . fileStore . logToMaster ( 'Running radia on %s:%s' % ( univ_options [ 'patient' ] , chrom ) ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'rna.bam' : bams [ 'tumor_rna' ] , 'rna.bam.bai' : bams [ 'tumor_rnai' ] , 'tumor.bam' : bams [ 'tumor_dna' ] , 'tumor.bam.bai' : bams [ 'tumor_dnai' ] , 'normal.bam' : bams [ 'normal_dna' ] , 'normal.bam.bai' : bams [ 'normal_dnai' ] , 'genome.fasta' : radia_options [ 'genome_fasta' ] , 'genome.fasta.fai' : radia_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) radia_output = '' . join ( [ work_dir , '/radia_' , chrom , '.vcf' ] ) radia_log = '' . join ( [ work_dir , '/radia_' , chrom , '_radia.log' ] ) parameters = [ univ_options [ 'patient' ] , chrom , '-n' , input_files [ 'normal.bam' ] , '-t' , input_files [ 'tumor.bam' ] , '-r' , input_files [ 'rna.bam' ] , '' . join ( [ '--rnaTumorFasta=' , input_files [ 'genome.fasta' ] ] ) , '-f' , input_files [ 'genome.fasta' ] , '-o' , docker_path ( radia_output ) , '-i' , 'hg19_M_rCRS' , '-m' , input_files [ 'genome.fasta' ] , '-d' , 'aarjunrao@soe.ucsc.edu' , '-q' , 'Illumina' , '--disease' , 'CANCER' , '-l' , 'INFO' , '-g' , docker_path ( radia_log ) ] docker_call ( tool = 'radia' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files = defaultdict ( ) for radia_file in [ radia_output , radia_log ] : output_files [ os . path . basename ( radia_file ) ] = job . fileStore . writeGlobalFile ( radia_file ) filterradia = job . wrapJobFn ( run_filter_radia , bams , output_files [ os . path . basename ( radia_output ) ] , univ_options , radia_options , chrom , disk = '60G' , memory = '6G' ) job . addChild ( filterradia ) return filterradia . rv ( )
|
This module will run radia on the RNA and DNA bams
|
2,827
|
def run_filter_radia ( job , bams , radia_file , univ_options , radia_options , chrom ) : job . fileStore . logToMaster ( 'Running filter-radia on %s:%s' % ( univ_options [ 'patient' ] , chrom ) ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'rna.bam' : bams [ 'tumor_rna' ] , 'rna.bam.bai' : bams [ 'tumor_rnai' ] , 'tumor.bam' : bams [ 'tumor_dna' ] , 'tumor.bam.bai' : bams [ 'tumor_dnai' ] , 'normal.bam' : bams [ 'normal_dna' ] , 'normal.bam.bai' : bams [ 'normal_dnai' ] , 'radia.vcf' : radia_file , 'genome.fasta' : radia_options [ 'genome_fasta' ] , 'genome.fasta.fai' : radia_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) filterradia_output = '' . join ( [ 'radia_filtered_' , chrom , '.vcf' ] ) filterradia_log = '' . join ( [ work_dir , '/radia_filtered_' , chrom , '_radia.log' ] ) parameters = [ univ_options [ 'patient' ] , chrom . lstrip ( 'chr' ) , input_files [ 'radia.vcf' ] , '/data' , '/home/radia/scripts' , '-b' , '/home/radia/data/hg19/blacklists/1000Genomes/phase1/' , '-d' , '/home/radia/data/hg19/snp135' , '-r' , '/home/radia/data/hg19/retroGenes/' , '-p' , '/home/radia/data/hg19/pseudoGenes/' , '-c' , '/home/radia/data/hg19/cosmic/' , '-t' , '/home/radia/data/hg19/gaf/2_1' , '--noSnpEff' , '--rnaGeneBlckFile' , '/home/radia/data/rnaGeneBlacklist.tab' , '--rnaGeneFamilyBlckFile' , '/home/radia/data/rnaGeneFamilyBlacklist.tab' , '-f' , input_files [ 'genome.fasta' ] , '--log=INFO' , '-g' , docker_path ( filterradia_log ) ] docker_call ( tool = 'filterradia' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files = defaultdict ( ) output_files [ filterradia_output ] = job . fileStore . writeGlobalFile ( '' . join ( [ work_dir , '/' , univ_options [ 'patient' ] , '_' , chrom , '.vcf' ] ) ) output_files [ os . path . basename ( filterradia_log ) ] = job . fileStore . writeGlobalFile ( filterradia_log ) return output_files
|
This module will run filterradia on the RNA and DNA bams .
|
2,828
|
def merge_mutect ( job , perchrom_rvs ) : job . fileStore . logToMaster ( 'Running merge_mutect' ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { filename : jsid for perchrom_files in perchrom_rvs . values ( ) for filename , jsid in perchrom_files . items ( ) } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) chromosomes = [ '' . join ( [ 'chr' , str ( x ) ] ) for x in range ( 1 , 23 ) + [ 'X' , 'Y' ] ] with open ( '/' . join ( [ work_dir , 'mutect_calls.vcf' ] ) , 'w' ) as mutvcf , open ( '/' . join ( [ work_dir , 'mutect_calls.out' ] ) , 'w' ) as mutout , open ( '/' . join ( [ work_dir , 'mutect_passing_calls.vcf' ] ) , 'w' ) as mutpassvcf : out_header_not_printed = True for chrom in chromosomes : with open ( input_files [ '' . join ( [ 'mutect_' , chrom , '.vcf' ] ) ] , 'r' ) as mutfile : for line in mutfile : line = line . strip ( ) if line . startswith ( '#' ) : if chrom == 'chr1' : print ( line , file = mutvcf ) print ( line , file = mutpassvcf ) continue else : print ( line , file = mutvcf ) line = line . split ( '\t' ) if line [ 6 ] != 'REJECT' : print ( '\t' . join ( line ) , file = mutpassvcf ) with open ( input_files [ '' . join ( [ 'mutect_' , chrom , '.out' ] ) ] , 'r' ) as mutfile : for line in mutfile : line = line . strip ( ) if line . startswith ( '#' ) : if chrom == 'chr1' : print ( line , file = mutout ) continue elif out_header_not_printed : print ( line , file = mutout ) out_header_not_printed = False else : print ( line , file = mutout ) output_file = job . fileStore . writeGlobalFile ( mutpassvcf . name ) return output_file
|
This module will merge the per - chromosome mutect files created by spawn_mutect into a genome vcf . It will make 2 vcfs one for PASSing non - germline calls and one for all calls .
|
2,829
|
def run_mutect ( job , tumor_bam , normal_bam , univ_options , mutect_options , chrom ) : job . fileStore . logToMaster ( 'Running mutect on %s:%s' % ( univ_options [ 'patient' ] , chrom ) ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa' : mutect_options [ 'genome_fasta' ] , 'genome.fa.fai' : mutect_options [ 'genome_fai' ] , 'genome.dict' : mutect_options [ 'genome_dict' ] , 'cosmic.vcf' : mutect_options [ 'cosmic_vcf' ] , 'cosmic.vcf.idx' : mutect_options [ 'cosmic_idx' ] , 'dbsnp.vcf' : mutect_options [ 'dbsnp_vcf' ] , 'dbsnp.vcf.idx' : mutect_options [ 'dbsnp_idx' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) mutout = '' . join ( [ work_dir , '/mutect_' , chrom , '.out' ] ) mutvcf = '' . join ( [ work_dir , '/mutect_' , chrom , '.vcf' ] ) parameters = [ '-R' , input_files [ 'genome.fa' ] , '--cosmic' , input_files [ 'cosmic.vcf' ] , '--dbsnp' , input_files [ 'dbsnp.vcf' ] , '--input_file:normal' , input_files [ 'normal.bam' ] , '--input_file:tumor' , input_files [ 'tumor.bam' ] , '-L' , chrom , '--out' , docker_path ( mutout ) , '--vcf' , docker_path ( mutvcf ) ] Xmx = mutect_options [ 'java_Xmx' ] if mutect_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] docker_call ( tool = 'mutect:1.1.7' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_opts = Xmx ) output_files = defaultdict ( ) for mutect_file in [ mutout , mutvcf ] : output_files [ os . path . basename ( mutect_file ) ] = job . fileStore . writeGlobalFile ( mutect_file ) return output_files
|
This module will run mutect on the DNA bams
|
2,830
|
def run_indel_caller ( job , tumor_bam , normal_bam , univ_options , indel_options ) : job . fileStore . logToMaster ( 'Running INDEL on %s' % univ_options [ 'patient' ] ) indel_file = job . fileStore . getLocalTempFile ( ) output_file = job . fileStore . writeGlobalFile ( indel_file ) return output_file
|
This module will run an indel caller on the DNA bams . This module will be implemented in the future .
|
2,831
|
def run_fusion_caller ( job , star_bam , univ_options , fusion_options ) : job . fileStore . logToMaster ( 'Running FUSION on %s' % univ_options [ 'patient' ] ) fusion_file = job . fileStore . getLocalTempFile ( ) output_file = job . fileStore . writeGlobalFile ( fusion_file ) return output_file
|
This module will run a fusion caller on DNA bams . This module will be implemented in the future .
|
2,832
|
def run_mutation_aggregator ( job , fusion_output , radia_output , mutect_output , indel_output , univ_options ) : job . fileStore . logToMaster ( 'Aggregating mutations for %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'mutect.vcf' : mutect_output , 'radia.vcf' : radia_output [ 'radia_parsed_filter_passing_calls.vcf' ] , 'indel.vcf' : indel_output , 'fusion.vcf' : fusion_output } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files . pop ( 'indel.vcf' ) input_files . pop ( 'fusion.vcf' ) vcf_file = defaultdict ( ) mutcallers = input_files . keys ( ) with open ( '' . join ( [ work_dir , '/' , univ_options [ 'patient' ] , '_merged_mutations.vcf' ] ) , 'w' ) as merged_mut_file : for mut_caller in mutcallers : caller = mut_caller . rstrip ( '.vcf' ) vcf_file [ caller ] = defaultdict ( ) with open ( input_files [ mut_caller ] , 'r' ) as mutfile : for line in mutfile : if line . startswith ( '#' ) : if caller == 'radia' : print ( line . strip ( ) , file = merged_mut_file ) continue line = line . strip ( ) . split ( ) vcf_file [ caller ] [ ( line [ 0 ] , line [ 1 ] , line [ 3 ] , line [ 4 ] ) ] = line merge_vcfs ( vcf_file , merged_mut_file . name ) export_results ( merged_mut_file . name , univ_options ) output_file = job . fileStore . writeGlobalFile ( merged_mut_file . name ) return output_file
|
This module will aggregate all the mutations called in the previous steps and will then call snpeff on the results .
|
2,833
|
def run_snpeff ( job , merged_mutation_file , univ_options , snpeff_options ) : job . fileStore . logToMaster ( 'Running snpeff on %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'merged_mutations.vcf' : merged_mutation_file , 'snpeff_index.tar.gz' : snpeff_options [ 'index_tar' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'eff' , '-dataDir' , input_files [ 'snpeff_index' ] , '-c' , '/' . join ( [ input_files [ 'snpeff_index' ] , 'snpEff_hg19_gencode.config' ] ) , '-no-intergenic' , '-no-downstream' , '-no-upstream' , '-noStats' , 'hg19_gencode' , input_files [ 'merged_mutations.vcf' ] ] Xmx = snpeff_options [ 'java_Xmx' ] if snpeff_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] with open ( '/' . join ( [ work_dir , 'snpeffed_mutations.vcf' ] ) , 'w' ) as snpeff_file : docker_call ( tool = 'snpeff' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_opts = Xmx , outfile = snpeff_file ) output_file = job . fileStore . writeGlobalFile ( snpeff_file . name ) return output_file
|
This module will run snpeff on the aggregated mutation calls . Currently the only mutations called are SNPs hence SnpEff suffices . This node will be replaced in the future with another translator .
|
2,834
|
def run_transgene ( job , snpeffed_file , univ_options , transgene_options ) : job . fileStore . logToMaster ( 'Running transgene on %s' % univ_options [ 'patient' ] ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'snpeffed_muts.vcf' : snpeffed_file , 'pepts.fa' : transgene_options [ 'gencode_peptide_fasta' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ '--peptides' , input_files [ 'pepts.fa' ] , '--snpeff' , input_files [ 'snpeffed_muts.vcf' ] , '--prefix' , 'transgened' , '--pep_lens' , '9,10,15' ] docker_call ( tool = 'transgene' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files = defaultdict ( ) for peplen in [ '9' , '10' , '15' ] : peptfile = '_' . join ( [ 'transgened_tumor' , peplen , 'mer_snpeffed.faa' ] ) mapfile = '_' . join ( [ 'transgened_tumor' , peplen , 'mer_snpeffed.faa.map' ] ) output_files [ peptfile ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , peptfile ) ) output_files [ mapfile ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , mapfile ) ) return output_files
|
This module will run transgene on the input vcf file from the aggregator and produce the peptides for MHC prediction
|
2,835
|
def run_phlat ( job , fastqs , sample_type , univ_options , phlat_options ) : job . fileStore . logToMaster ( 'Running phlat on %s:%s' % ( univ_options [ 'patient' ] , sample_type ) ) work_dir = job . fileStore . getLocalTempDir ( ) fq_extn = '.gz' if fastqs [ 'gzipped' ] else '' input_files = { 'input_1.fastq' + fq_extn : fastqs [ sample_type ] [ 0 ] , 'input_2.fastq' + fq_extn : fastqs [ sample_type ] [ 1 ] , 'phlat_index.tar.gz' : phlat_options [ 'index_tar' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ '-1' , input_files [ 'input_1.fastq' ] , '-2' , input_files [ 'input_2.fastq' ] , '-index' , input_files [ 'phlat_index' ] , '-b2url' , '/usr/local/bin/bowtie2' , '-tag' , sample_type , '-e' , '/home/phlat-1.0' , '-o' , '/data' , '-p' , str ( phlat_options [ 'n' ] ) ] docker_call ( tool = 'phlat' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_file = job . fileStore . writeGlobalFile ( '' . join ( [ work_dir , '/' , sample_type , '_HLA.sum' ] ) ) return output_file
|
This module will run PHLAT on SAMPLE_TYPE fastqs .
|
2,836
|
def merge_phlat_calls ( job , tumor_phlat , normal_phlat , rna_phlat ) : job . fileStore . logToMaster ( 'Merging Phlat calls' ) work_dir = job . fileStore . getLocalTempDir ( ) input_files = { 'tumor_dna' : tumor_phlat , 'normal_dna' : normal_phlat , 'tumor_rna' : rna_phlat } input_files = get_files_from_filestore ( job , input_files , work_dir ) with open ( input_files [ 'tumor_dna' ] , 'r' ) as td_file , open ( input_files [ 'normal_dna' ] , 'r' ) as nd_file , open ( input_files [ 'tumor_rna' ] , 'r' ) as tr_file : mhc_alleles = { 'HLA_A' : [ ] , 'HLA_B' : [ ] , 'HLA_C' : [ ] , 'HLA_DPA' : [ ] , 'HLA_DQA' : [ ] , 'HLA_DPB' : [ ] , 'HLA_DQB' : [ ] , 'HLA_DRB' : [ ] } for phlatfile in td_file , nd_file , tr_file : mhc_alleles = parse_phlat_file ( phlatfile , mhc_alleles ) with open ( os . path . join ( work_dir , 'mhci_alleles.list' ) , 'w' ) as mhci_file , open ( os . path . join ( work_dir , 'mhcii_alleles.list' ) , 'w' ) as mhcii_file : for mhci_group in [ 'HLA_A' , 'HLA_B' , 'HLA_C' ] : mpa = most_probable_alleles ( mhc_alleles [ mhci_group ] ) print ( '\n' . join ( [ '' . join ( [ 'HLA-' , x ] ) for x in mpa ] ) , file = mhci_file ) drb_mpa = most_probable_alleles ( mhc_alleles [ 'HLA_DRB' ] ) print ( '\n' . join ( [ '' . join ( [ 'HLA-' , x ] ) for x in drb_mpa ] ) , file = mhcii_file ) dqa_mpa = most_probable_alleles ( mhc_alleles [ 'HLA_DQA' ] ) dqb_mpa = most_probable_alleles ( mhc_alleles [ 'HLA_DQB' ] ) for dqa_allele in dqa_mpa : for dqb_allele in dqb_mpa : print ( '' . join ( [ 'HLA-' , dqa_allele , '/' , dqb_allele ] ) , file = mhcii_file ) output_files = defaultdict ( ) for allele_file in [ 'mhci_alleles.list' , 'mhcii_alleles.list' ] : output_files [ allele_file ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , allele_file ) ) return output_files
|
This module will merge the results form running PHLAT on the 3 input fastq pairs .
|
2,837
|
def boost_ranks ( job , isoform_expression , merged_mhc_calls , transgene_out , univ_options , rank_boost_options ) : job . fileStore . logToMaster ( 'Running boost_ranks on %s' % univ_options [ 'patient' ] ) work_dir = os . path . join ( job . fileStore . getLocalTempDir ( ) , univ_options [ 'patient' ] ) os . mkdir ( work_dir ) input_files = { 'rsem_quant.tsv' : isoform_expression , 'mhci_merged_files.tsv' : merged_mhc_calls [ 'mhci_merged_files.list' ] , 'mhcii_merged_files.tsv' : merged_mhc_calls [ 'mhcii_merged_files.list' ] , 'mhci_peptides.faa' : transgene_out [ 'transgened_tumor_10_mer_snpeffed.faa' ] , 'mhcii_peptides.faa' : transgene_out [ 'transgened_tumor_15_mer_snpeffed.faa' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) output_files = { } for mhc in ( 'mhci' , 'mhcii' ) : parameters = [ mhc , input_files [ '' . join ( [ mhc , '_merged_files.tsv' ] ) ] , input_files [ 'rsem_quant.tsv' ] , input_files [ '' . join ( [ mhc , '_peptides.faa' ] ) ] , rank_boost_options [ '' . join ( [ mhc , '_combo' ] ) ] ] docker_call ( tool = 'rankboost' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] ) output_files [ mhc ] = { '' . join ( [ mhc , '_concise_results.tsv' ] ) : job . fileStore . writeGlobalFile ( '' . join ( [ work_dir , '/' , mhc , '_merged_files_concise_results.tsv' ] ) ) , '' . join ( [ mhc , '_detailed_results.tsv' ] ) : job . fileStore . writeGlobalFile ( '' . join ( [ work_dir , '/' , mhc , '_merged_files_detailed_results.tsv' ] ) ) } export_results ( work_dir , univ_options ) return output_files
|
This is the final module in the pipeline . It will call the rank boosting R script .
|
2,838
|
def get_files_from_filestore ( job , files , work_dir , cache = True , docker = False ) : for name in files . keys ( ) : outfile = job . fileStore . readGlobalFile ( files [ name ] , '/' . join ( [ work_dir , name ] ) , cache = cache ) if tarfile . is_tarfile ( outfile ) and file_xext ( outfile ) . startswith ( '.tar' ) : untar_name = os . path . basename ( strip_xext ( outfile ) ) files [ untar_name ] = untargz ( outfile , work_dir ) files . pop ( name ) name = os . path . basename ( untar_name ) elif is_gzipfile ( outfile ) and file_xext ( outfile ) == '.gz' : ungz_name = strip_xext ( outfile ) with gzip . open ( outfile , 'rb' ) as gz_in , open ( ungz_name , 'w' ) as ungz_out : shutil . copyfileobj ( gz_in , ungz_out ) files [ os . path . basename ( ungz_name ) ] = outfile files . pop ( name ) name = os . path . basename ( ungz_name ) else : files [ name ] = outfile if docker : files [ name ] = docker_path ( files [ name ] ) return files
|
This is adapted from John Vivian s return_input_paths from the RNA - Seq pipeline .
|
2,839
|
def merge_vcfs ( vcf_file , merged_mut_file ) : mutect_keys = set ( vcf_file [ 'mutect' ] . keys ( ) ) radia_keys = set ( vcf_file [ 'radia' ] . keys ( ) ) common_keys = radia_keys . intersection ( mutect_keys ) with open ( merged_mut_file , 'a' ) as outfile : for mutation in common_keys : print ( '\t' . join ( vcf_file [ 'radia' ] [ mutation ] ) , file = outfile ) return None
|
This module will accept the vcf files for mutect and radia read into memory in a dict object VCF_FILE and will merge the calls . Merged calls are printed to MERGED_MUT_FILE .
|
2,840
|
def docker_call ( tool , tool_parameters , work_dir , java_opts = None , outfile = None , dockerhub = 'aarjunrao' , interactive = False ) : if outfile : assert isinstance ( outfile , file ) , 'outfile was not passsed a file' assert outfile . mode in [ 'w' , 'a' , 'wb' , 'ab' ] , 'outfile not writeable' assert not outfile . closed , 'outfile is closed' if interactive : interactive = '-i' else : interactive = '' if ':' in tool : docker_tool = '/' . join ( [ dockerhub , tool ] ) else : docker_tool = '' . join ( [ dockerhub , '/' , tool , ':latest' ] ) call = [ 'docker' , 'images' ] dimg_rv = subprocess . check_output ( call ) existing_images = [ ':' . join ( x . split ( ) [ 0 : 2 ] ) for x in dimg_rv . splitlines ( ) if x . startswith ( dockerhub ) ] if docker_tool not in existing_images : try : call = ' ' . join ( [ 'docker' , 'pull' , docker_tool ] ) . split ( ) subprocess . check_call ( call ) except subprocess . CalledProcessError as err : raise RuntimeError ( 'docker command returned a non-zero exit status ' + '(%s)' % err . returncode + 'for command \"%s\"' % ' ' . join ( call ) , ) except OSError : raise RuntimeError ( 'docker not found on system. Install on all' + ' nodes.' ) if java_opts : base_docker_call = ' docker run -e JAVA_OPTS=-Xmx{} ' . format ( java_opts ) + '--rm=true ' + '-v {}:/data --log-driver=none ' . format ( work_dir ) + interactive else : base_docker_call = ' docker run --rm=true -v {}:/data ' . format ( work_dir ) + '--log-driver=none ' + interactive call = base_docker_call . split ( ) + [ docker_tool ] + tool_parameters try : subprocess . check_call ( call , stdout = outfile ) except subprocess . CalledProcessError as err : raise RuntimeError ( 'docker command returned a non-zero exit status (%s)' % err . returncode + 'for command \"%s\"' % ' ' . join ( call ) , ) except OSError : raise RuntimeError ( 'docker not found on system. Install on all nodes.' )
|
Makes subprocess call of a command to a docker container . work_dir MUST BE AN ABSOLUTE PATH or the call will fail . outfile is an open file descriptor to a writeable file .
|
2,841
|
def untargz ( input_targz_file , untar_to_dir ) : assert tarfile . is_tarfile ( input_targz_file ) , 'Not a tar file.' tarball = tarfile . open ( input_targz_file ) return_value = os . path . join ( untar_to_dir , tarball . getmembers ( ) [ 0 ] . name ) tarball . extractall ( path = untar_to_dir ) tarball . close ( ) return return_value
|
This module accepts a tar . gz archive and untars it .
|
2,842
|
def bam2fastq ( job , bamfile , univ_options ) : work_dir = os . path . split ( bamfile ) [ 0 ] base_name = os . path . split ( os . path . splitext ( bamfile ) [ 0 ] ) [ 1 ] parameters = [ 'SamToFastq' , '' . join ( [ 'I=' , docker_path ( bamfile ) ] ) , '' . join ( [ 'F=/data/' , base_name , '_1.fastq' ] ) , '' . join ( [ 'F2=/data/' , base_name , '_2.fastq' ] ) , '' . join ( [ 'FU=/data/' , base_name , '_UP.fastq' ] ) ] docker_call ( tool = 'picard' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_opts = univ_options [ 'java_Xmx' ] ) first_fastq = '' . join ( [ work_dir , '/' , base_name , '_1.fastq' ] ) assert os . path . exists ( first_fastq ) return first_fastq
|
split an input bam to paired fastqs .
|
2,843
|
def main ( ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( '--config_file' , dest = 'config_file' , help = 'Config file to be used in the' + 'run.' , type = str , required = True , default = None ) Job . Runner . addToilOptions ( parser ) params = parser . parse_args ( ) START = Job . wrapJobFn ( parse_config_file , params . config_file ) . encapsulate ( ) Job . Runner . startToil ( START , params ) return None
|
This is the main function for the UCSC Precision Immuno pipeline .
|
2,844
|
def run_strelka_with_merge ( job , tumor_bam , normal_bam , univ_options , strelka_options ) : spawn = job . wrapJobFn ( run_strelka , tumor_bam , normal_bam , univ_options , strelka_options , split = False ) . encapsulate ( ) job . addChild ( spawn ) return spawn . rv ( )
|
A wrapper for the the entire strelka sub - graph .
|
2,845
|
def run_strelka ( job , tumor_bam , normal_bam , univ_options , strelka_options , split = True ) : if strelka_options [ 'chromosomes' ] : chromosomes = strelka_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , strelka_options [ 'genome_fai' ] ) num_cores = min ( len ( chromosomes ) , univ_options [ 'max_cores' ] ) strelka = job . wrapJobFn ( run_strelka_full , tumor_bam , normal_bam , univ_options , strelka_options , disk = PromisedRequirement ( strelka_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , strelka_options [ 'genome_fasta' ] ) , memory = '6G' , cores = num_cores ) job . addChild ( strelka ) if split : unmerge_strelka = job . wrapJobFn ( wrap_unmerge , strelka . rv ( ) , chromosomes , strelka_options , univ_options ) . encapsulate ( ) strelka . addChild ( unmerge_strelka ) return unmerge_strelka . rv ( ) else : return strelka . rv ( )
|
Run the strelka subgraph on the DNA bams . Optionally split the results into per - chromosome vcfs .
|
2,846
|
def run_strelka_full ( job , tumor_bam , normal_bam , univ_options , strelka_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : strelka_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : strelka_options [ 'genome_fai' ] , 'config.ini.tar.gz' : strelka_options [ 'config_file' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' , 'config.ini' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ input_files [ 'config.ini' ] , input_files [ 'tumor.bam' ] , input_files [ 'normal.bam' ] , input_files [ 'genome.fa' ] , str ( job . cores ) ] docker_call ( tool = 'strelka' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = strelka_options [ 'version' ] ) output_dict = { } for mutation_type in [ 'snvs' , 'indels' ] : output_dict [ mutation_type ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'strelka_out' , 'results' , 'passed.somatic.' + mutation_type + '.vcf' ) ) job . fileStore . logToMaster ( 'Ran strelka on %s successfully' % univ_options [ 'patient' ] ) return output_dict
|
Run strelka on the DNA bams .
|
2,847
|
def wrap_unmerge ( job , strelka_out , chromosomes , strelka_options , univ_options ) : return { 'snvs' : job . addChildJobFn ( unmerge , strelka_out [ 'snvs' ] , 'strelka/snv' , chromosomes , strelka_options , univ_options ) . rv ( ) , 'indels' : job . addChildJobFn ( unmerge , strelka_out [ 'indels' ] , 'strelka/indel' , chromosomes , strelka_options , univ_options ) . rv ( ) }
|
A wwrapper to unmerge the strelka snvs and indels
|
2,848
|
def get_iso_time_str ( timestamp : Union [ int , float , str , datetime ] = None ) -> str : if isinstance ( timestamp , ( int , float ) ) : maya_dt = maya . MayaDT ( timestamp ) elif isinstance ( timestamp , str ) : maya_dt = maya . when ( timestamp ) elif timestamp is None : maya_dt = maya . now ( ) else : raise ValueError ( f'`{type(timestamp)}` is not supported' ) return maya_dt . iso8601 ( )
|
Get the ISO time string from a timestamp or date obj . Returns current time str if no timestamp is passed
|
2,849
|
def truncate ( value : Decimal , n_digits : int ) -> Decimal : return Decimal ( math . trunc ( value * ( 10 ** n_digits ) ) ) / ( 10 ** n_digits )
|
Truncates a value to a number of decimals places
|
2,850
|
def truncate_to ( value : Decimal , currency : str ) -> Decimal : decimal_places = DECIMALS . get ( currency . upper ( ) , 2 ) return truncate ( value , decimal_places )
|
Truncates a value to the number of decimals corresponding to the currency
|
2,851
|
def truncate_money ( money : Money ) -> Money : amount = truncate_to ( money . amount , money . currency ) return Money ( amount , money . currency )
|
Truncates money amount to the number of decimals corresponding to the currency
|
2,852
|
def spread_value ( value : Decimal , spread_p : Decimal ) -> Tuple [ Decimal , Decimal ] : upper = value * ( 1 + spread_p ) lower = value / ( 1 + spread_p ) return lower , upper
|
Returns a lower and upper value separated by a spread percentage
|
2,853
|
def spread_money ( money : Money , spread_p : Decimal ) -> Tuple [ Money , Money ] : upper , lower = spread_value ( money . amount , spread_p ) return Money ( upper , money . currency ) , Money ( lower , money . currency )
|
Returns a lower and upper money amount separated by a spread percentage
|
2,854
|
def check_valid_ad_range ( date ) : if date < values . START_EN_DATE or date > values . END_EN_DATE : raise ValueError ( "Date out of range" ) return True
|
Checks if the english date is in valid range for conversion
|
2,855
|
def check_valid_bs_range ( date ) : ERR_MSG = "%s out of range" % str ( date ) if date . year < values . START_NP_YEAR or date . year > values . END_NP_YEAR : raise ValueError ( ERR_MSG ) if date . month < 1 or date . month > 12 : raise ValueError ( ERR_MSG ) if date . day < 1 or date . day > values . NEPALI_MONTH_DAY_DATA [ date . year ] [ date . month - 1 ] : raise ValueError ( ERR_MSG ) return True
|
Checks if the nepali date is in valid range for conversion
|
2,856
|
def nepali_number ( number ) : nepnum = "" for n in str ( number ) : nepnum += values . NEPDIGITS [ int ( n ) ] return nepnum
|
Convert a number to nepali
|
2,857
|
def get_fields ( self ) : fields = super ( GeoModelSerializer , self ) . get_fields ( ) if not self . Meta . geom_field : for name , field in fields . items ( ) : if isinstance ( field , GeometryField ) : self . Meta . geom_field = name break return fields
|
Returns a fields dict for this serializer with a geometry field added .
|
2,858
|
def run_muse_with_merge ( job , tumor_bam , normal_bam , univ_options , muse_options ) : spawn = job . wrapJobFn ( run_muse , tumor_bam , normal_bam , univ_options , muse_options , disk = '100M' ) . encapsulate ( ) merge = job . wrapJobFn ( merge_perchrom_vcfs , spawn . rv ( ) , disk = '100M' ) job . addChild ( spawn ) spawn . addChild ( merge ) return merge . rv ( )
|
A wrapper for the the entire MuSE sub - graph .
|
2,859
|
def run_muse ( job , tumor_bam , normal_bam , univ_options , muse_options ) : if muse_options [ 'chromosomes' ] : chromosomes = muse_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , muse_options [ 'genome_fai' ] ) perchrom_muse = defaultdict ( ) for chrom in chromosomes : call = job . addChildJobFn ( run_muse_perchrom , tumor_bam , normal_bam , univ_options , muse_options , chrom , disk = PromisedRequirement ( muse_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , muse_options [ 'genome_fasta' ] ) , memory = '6G' ) sump = call . addChildJobFn ( run_muse_sump_perchrom , call . rv ( ) , univ_options , muse_options , chrom , disk = PromisedRequirement ( muse_sump_disk , muse_options [ 'dbsnp_vcf' ] ) , memory = '6G' ) perchrom_muse [ chrom ] = sump . rv ( ) return perchrom_muse
|
Spawn a MuSE job for each chromosome on the DNA bams .
|
2,860
|
def run_muse_perchrom ( job , tumor_bam , normal_bam , univ_options , muse_options , chrom ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : muse_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : muse_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } output_prefix = os . path . join ( work_dir , chrom ) parameters = [ 'call' , '-f' , input_files [ 'genome.fa' ] , '-r' , chrom , '-O' , docker_path ( output_prefix ) , input_files [ 'tumor.bam' ] , input_files [ 'normal.bam' ] ] docker_call ( tool = 'muse' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = muse_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( '' . join ( [ output_prefix , '.MuSE.txt' ] ) ) job . fileStore . logToMaster ( 'Ran MuSE on %s:%s successfully' % ( univ_options [ 'patient' ] , chrom ) ) return outfile
|
Run MuSE call on a single chromosome in the input bams .
|
2,861
|
def run_muse_sump_perchrom ( job , muse_output , univ_options , muse_options , chrom ) : work_dir = os . getcwd ( ) input_files = { 'MuSE.txt' : muse_output , 'dbsnp_coding.vcf.gz' : muse_options [ 'dbsnp_vcf' ] , 'dbsnp_coding.vcf.gz.tbi.tmp' : muse_options [ 'dbsnp_tbi' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) tbi = os . path . splitext ( input_files [ 'dbsnp_coding.vcf.gz.tbi.tmp' ] ) [ 0 ] time . sleep ( 2 ) shutil . copy ( input_files [ 'dbsnp_coding.vcf.gz.tbi.tmp' ] , tbi ) os . chmod ( tbi , 0777 ) open ( tbi , 'a' ) . close ( ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } output_file = '' . join ( [ work_dir , '/' , chrom , '.vcf' ] ) parameters = [ 'sump' , '-I' , input_files [ 'MuSE.txt' ] , '-O' , docker_path ( output_file ) , '-D' , input_files [ 'dbsnp_coding.vcf.gz' ] , '-E' ] docker_call ( tool = 'muse' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = muse_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( output_file ) export_results ( job , outfile , output_file , univ_options , subfolder = 'mutations/muse' ) job . fileStore . logToMaster ( 'Ran MuSE sump on %s:%s successfully' % ( univ_options [ 'patient' ] , chrom ) ) return outfile
|
Run MuSE sump on the MuSE call generated vcf .
|
2,862
|
def linear ( self , limits = None , k = 5 ) : start , stop = limits or ( self . minval , self . maxval ) return np . linspace ( start , stop , k )
|
Returns an ndarray of linear breaks .
|
2,863
|
def quantiles ( self , k = 5 ) : arr = self . array ( ) q = list ( np . linspace ( 0 , 100 , k ) ) return np . percentile ( arr . compressed ( ) , q )
|
Returns an ndarray of quantile breaks .
|
2,864
|
def to_python ( self , value ) : if not value : return [ ] return map ( super ( CommaSepFloatField , self ) . to_python , value . split ( ',' ) )
|
Normalize data to a list of floats .
|
2,865
|
def run_validators ( self , values ) : for val in values : super ( CommaSepFloatField , self ) . run_validators ( val )
|
Run validators for each item separately .
|
2,866
|
def to_python ( self , value ) : value = super ( BoundingBoxField , self ) . to_python ( value ) try : bbox = gdal . OGRGeometry . from_bbox ( value ) . geos except ( ValueError , AttributeError ) : return [ ] bbox . srid = self . srid return bbox
|
Returns a GEOS Polygon from bounding box values .
|
2,867
|
def run_mutect_with_merge ( job , tumor_bam , normal_bam , univ_options , mutect_options ) : spawn = job . wrapJobFn ( run_mutect , tumor_bam , normal_bam , univ_options , mutect_options ) . encapsulate ( ) merge = job . wrapJobFn ( merge_perchrom_vcfs , spawn . rv ( ) ) job . addChild ( spawn ) spawn . addChild ( merge ) return merge . rv ( )
|
A wrapper for the the entire MuTect sub - graph .
|
2,868
|
def run_mutect ( job , tumor_bam , normal_bam , univ_options , mutect_options ) : if mutect_options [ 'chromosomes' ] : chromosomes = mutect_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , mutect_options [ 'genome_fai' ] ) perchrom_mutect = defaultdict ( ) for chrom in chromosomes : perchrom_mutect [ chrom ] = job . addChildJobFn ( run_mutect_perchrom , tumor_bam , normal_bam , univ_options , mutect_options , chrom , memory = '6G' , disk = PromisedRequirement ( mutect_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , mutect_options [ 'genome_fasta' ] , mutect_options [ 'dbsnp_vcf' ] , mutect_options [ 'cosmic_vcf' ] ) ) . rv ( ) return perchrom_mutect
|
Spawn a MuTect job for each chromosome on the DNA bams .
|
2,869
|
def run_mutect_perchrom ( job , tumor_bam , normal_bam , univ_options , mutect_options , chrom ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : mutect_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : mutect_options [ 'genome_fai' ] , 'genome.dict.tar.gz' : mutect_options [ 'genome_dict' ] , 'cosmic.vcf.tar.gz' : mutect_options [ 'cosmic_vcf' ] , 'cosmic.vcf.idx.tar.gz' : mutect_options [ 'cosmic_idx' ] , 'dbsnp.vcf.gz' : mutect_options [ 'dbsnp_vcf' ] , 'dbsnp.vcf.idx.tar.gz' : mutect_options [ 'dbsnp_idx' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'dbsnp.vcf' ] = gunzip ( input_files [ 'dbsnp.vcf.gz' ] ) for key in ( 'genome.fa' , 'genome.fa.fai' , 'genome.dict' , 'cosmic.vcf' , 'cosmic.vcf.idx' , 'dbsnp.vcf.idx' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } mutout = '' . join ( [ work_dir , '/' , chrom , '.out' ] ) mutvcf = '' . join ( [ work_dir , '/' , chrom , '.vcf' ] ) parameters = [ '-R' , input_files [ 'genome.fa' ] , '--cosmic' , input_files [ 'cosmic.vcf' ] , '--dbsnp' , input_files [ 'dbsnp.vcf' ] , '--input_file:normal' , input_files [ 'normal.bam' ] , '--input_file:tumor' , input_files [ 'tumor.bam' ] , '-L' , chrom , '--out' , docker_path ( mutout ) , '--vcf' , docker_path ( mutvcf ) ] java_xmx = mutect_options [ 'java_Xmx' ] if mutect_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] docker_call ( tool = 'mutect' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = java_xmx , tool_version = mutect_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( mutvcf ) export_results ( job , output_file , mutvcf , univ_options , subfolder = 'mutations/mutect' ) job . fileStore . logToMaster ( 'Ran MuTect on %s:%s successfully' % ( univ_options [ 'patient' ] , chrom ) ) return output_file
|
Run MuTect call on a single chromosome in the input bams .
|
2,870
|
def process_mutect_vcf ( job , mutect_vcf , work_dir , univ_options ) : mutect_vcf = job . fileStore . readGlobalFile ( mutect_vcf ) with open ( mutect_vcf , 'r' ) as infile , open ( mutect_vcf + 'mutect_parsed.tmp' , 'w' ) as outfile : for line in infile : line = line . strip ( ) if line . startswith ( '#' ) : print ( line , file = outfile ) continue line = line . split ( '\t' ) if line [ 6 ] != 'REJECT' : print ( '\t' . join ( line ) , file = outfile ) return outfile . name
|
Process the MuTect vcf for accepted calls .
|
2,871
|
def get_universe ( self , as_str = False ) : result = self . _iface . get_connected_universe ( ) if as_str : return Universe . get_alias ( result ) return result
|
Returns universe the client is connected to . See Universe .
|
2,872
|
def EXTRA_LOGGING ( self ) : input_text = get ( 'EXTRA_LOGGING' , '' ) modules = input_text . split ( ',' ) if input_text : modules = input_text . split ( ',' ) modules = [ x . split ( ':' ) for x in modules ] else : modules = [ ] return modules
|
lista modulos con los distintos niveles a logear y su nivel de debug
|
2,873
|
def from_ad_date ( cls , date ) : functions . check_valid_ad_range ( date ) days = values . START_EN_DATE - date start_date = NepDate ( values . START_NP_YEAR , 1 , 1 ) return start_date + ( date - values . START_EN_DATE )
|
Gets a NepDate object from gregorian calendar date
|
2,874
|
def from_bs_date ( cls , year , month , day ) : return NepDate ( year , month , day ) . update ( )
|
Create and update an NepDate object for bikram sambat date
|
2,875
|
def events_list ( self ) : evt = [ ] evt . extend ( events . NEPALI_EVENTS [ self . month , self . day ] ) evt . extend ( events . ENGLISH_EVENTS [ self . en_date . month , self . en_date . day ] ) return evt
|
Returns the events today
|
2,876
|
def update ( self ) : functions . check_valid_bs_range ( self ) self . en_date = values . START_EN_DATE + ( self - NepDate ( values . START_NP_YEAR , 1 , 1 ) ) return self
|
Updates information about the NepDate
|
2,877
|
def get_file_from_s3 ( job , s3_url , encryption_key = None , write_to_jobstore = True ) : work_dir = job . fileStore . getLocalTempDir ( ) filename = '/' . join ( [ work_dir , os . path . basename ( s3_url ) ] ) download_call = [ 'curl' , '-fs' , '--retry' , '5' ] if encryption_key : key = generate_unique_key ( encryption_key , s3_url ) encoded_key = base64 . b64encode ( key ) encoded_key_md5 = base64 . b64encode ( hashlib . md5 ( key ) . digest ( ) ) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}' . format ( encoded_key ) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}' . format ( encoded_key_md5 ) download_call . extend ( [ '-H' , h1 , '-H' , h2 , '-H' , h3 ] ) download_call . extend ( [ s3_url , '-o' , filename ] ) try : subprocess . check_call ( download_call ) except subprocess . CalledProcessError : raise RuntimeError ( 'Curl returned a non-zero exit status processing %s. Do you' % s3_url + 'have premssions to access the file?' ) except OSError : raise RuntimeError ( 'Failed to find "curl". Install via "apt-get install curl"' ) assert os . path . exists ( filename ) if write_to_jobstore : filename = job . fileStore . writeGlobalFile ( filename ) return filename
|
Downloads a supplied URL that points to an unencrypted unprotected file on Amazon S3 . The file is downloaded and a subsequently written to the jobstore and the return value is a the path to the file in the jobstore .
|
2,878
|
def filter_geometry ( queryset , ** filters ) : fieldname = geo_field ( queryset ) . name query = { '%s__%s' % ( fieldname , k ) : v for k , v in filters . items ( ) } return queryset . filter ( ** query )
|
Helper function for spatial lookups filters .
|
2,879
|
def geo_field ( queryset ) : for field in queryset . model . _meta . fields : if isinstance ( field , models . GeometryField ) : return field raise exceptions . FieldDoesNotExist ( 'No GeometryField found' )
|
Returns the GeometryField for a django or spillway GeoQuerySet .
|
2,880
|
def get_srid ( queryset ) : try : srid = list ( six . viewvalues ( queryset . query . annotations ) ) [ 0 ] . srid except ( AttributeError , IndexError ) : srid = None return srid or geo_field ( queryset ) . srid
|
Returns the GeoQuerySet spatial reference identifier .
|
2,881
|
def agg_dims ( arr , stat ) : axis = None if arr . ndim > 2 : axis = 1 arr = arr . reshape ( arr . shape [ 0 ] , - 1 ) module = np . ma if hasattr ( arr , 'mask' ) else np return getattr ( module , stat ) ( arr , axis )
|
Returns a 1D array with higher dimensions aggregated using stat fn .
|
2,882
|
def extent ( self , srid = None ) : expr = self . geo_field . name if srid : expr = geofn . Transform ( expr , srid ) expr = models . Extent ( expr ) clone = self . all ( ) name , val = clone . aggregate ( expr ) . popitem ( ) return val
|
Returns the GeoQuerySet extent as a 4 - tuple .
|
2,883
|
def pbf ( self , bbox , geo_col = None , scale = 4096 ) : col = geo_col or self . geo_field . name w , s , e , n = bbox . extent trans = self . _trans_scale ( col , - w , - s , scale / ( e - w ) , scale / ( n - s ) ) g = AsText ( trans ) return self . annotate ( pbf = g )
|
Returns tranlated and scaled geometries suitable for Mapbox vector tiles .
|
2,884
|
def tile ( self , bbox , z = 0 , format = None , clip = True ) : tile_srid = 3857 bbox = getattr ( bbox , 'geos' , bbox ) clone = filter_geometry ( self , intersects = bbox ) field = clone . geo_field srid = field . srid sql = field . name try : tilew = self . tilewidths [ z ] except IndexError : tilew = self . tilewidths [ - 1 ] if bbox . srid != srid : bbox = bbox . transform ( srid , clone = True ) if bbox . srs . geographic : p = geos . Point ( tilew , tilew , srid = tile_srid ) p . transform ( srid ) tilew = p . x if clip : bufbox = bbox . buffer ( tilew ) sql = geofn . Intersection ( sql , bufbox . envelope ) sql = SimplifyPreserveTopology ( sql , tilew ) if format == 'pbf' : return clone . pbf ( bbox , geo_col = sql ) sql = geofn . Transform ( sql , 4326 ) return clone . annotate ( ** { format : sql } )
|
Returns a GeoQuerySet intersecting a tile boundary .
|
2,885
|
def arrays ( self , field_name = None ) : fieldname = field_name or self . raster_field . name arrays = [ ] for obj in self : arr = getattr ( obj , fieldname ) if isinstance ( arr , np . ndarray ) : arrays . append ( arr ) else : arrays . append ( obj . array ( ) ) return arrays
|
Returns a list of ndarrays .
|
2,886
|
def aggregate_periods ( self , periods ) : try : fieldname = self . raster_field . name except TypeError : raise exceptions . FieldDoesNotExist ( 'Raster field not found' ) arrays = self . arrays ( fieldname ) arr = arrays [ 0 ] if len ( arrays ) > 1 : if getattr ( arr , 'ndim' , 0 ) > 2 : arrays = np . vstack ( arrays ) fill = getattr ( arr , 'fill_value' , None ) arr = np . ma . masked_values ( arrays , fill , copy = False ) try : means = arr . reshape ( ( periods , - 1 ) ) . mean ( axis = 1 ) except ValueError : means = np . array ( [ a . mean ( ) for a in np . array_split ( arr , periods ) ] ) obj = self [ 0 ] setattr ( obj , fieldname , means ) return [ obj ]
|
Returns list of ndarrays averaged to a given number of periods .
|
2,887
|
def raster_field ( self ) : for field in self . model . _meta . fields : if isinstance ( field , models . FileField ) : return field return False
|
Returns the raster FileField instance on the model .
|
2,888
|
def zipfiles ( self , path = None , arcdirname = 'data' ) : if path : fp = open ( path , 'w+b' ) else : prefix = '%s-' % arcdirname fp = tempfile . NamedTemporaryFile ( prefix = prefix , suffix = '.zip' ) with zipfile . ZipFile ( fp , mode = 'w' ) as zf : for obj in self : img = obj . image arcname = os . path . join ( arcdirname , os . path . basename ( img . name ) ) try : zf . write ( img . path , arcname = arcname ) except OSError : img . seek ( 0 ) zf . writestr ( arcname , img . read ( ) ) img . close ( ) fp . seek ( 0 ) zobj = self . model ( image = fp ) return [ zobj ]
|
Returns a . zip archive of selected rasters .
|
2,889
|
def init ( self , app_id = None ) : self . set_app_id ( app_id ) err_msg = ( 'Unable to initialize. Check Steam client is running ' 'and Steam application ID is defined in steam_appid.txt or passed to Api.' ) if self . _lib . steam_init ( ) : try : _set_client ( self . _lib . Client ( ) ) self . utils = Utils ( ) self . current_user = CurrentUser ( ) self . friends = Friends ( ) self . groups = Groups ( ) self . apps = Applications ( ) self . overlay = Overlay ( ) self . screenshots = Screenshots ( ) except Exception as e : raise SteamApiStartupError ( '%s:\n%s' % ( err_msg , e ) ) else : raise SteamApiStartupError ( err_msg )
|
Initializes Steam API library .
|
2,890
|
def get_files_from_filestore ( job , files , work_dir , docker = False ) : for name in files . keys ( ) : outfile = job . fileStore . readGlobalFile ( files [ name ] , '/' . join ( [ work_dir , name ] ) ) if docker : files [ name ] = docker_path ( outfile ) else : files [ name ] = outfile return files
|
Download a dict of files to the given directory and modify the path to a docker - friendly one if requested .
|
2,891
|
def gunzip ( input_gzip_file , block_size = 1024 ) : assert os . path . splitext ( input_gzip_file ) [ 1 ] == '.gz' assert is_gzipfile ( input_gzip_file ) with gzip . open ( input_gzip_file ) as infile : with open ( os . path . splitext ( input_gzip_file ) [ 0 ] , 'w' ) as outfile : while True : block = infile . read ( block_size ) if block == '' : break else : outfile . write ( block ) return outfile . name
|
Gunzips the input file to the same directory
|
2,892
|
def is_gzipfile ( filename ) : assert os . path . exists ( filename ) , 'Input {} does not ' . format ( filename ) + 'point to a file.' with open ( filename , 'rb' ) as in_f : start_of_file = in_f . read ( 3 ) if start_of_file == '\x1f\x8b\x08' : return True else : return False
|
Attempt to ascertain the gzip status of a file based on the magic signatures of the file .
|
2,893
|
def get_file_from_gdc ( job , gdc_url , gdc_download_token , write_to_jobstore = True ) : work_dir = job . fileStore . getLocalTempDir ( ) parsed_url = urlparse ( gdc_url ) assert parsed_url . scheme == 'gdc' , 'Unexpected url scheme: %s' % gdc_url file_dir = '/' . join ( [ work_dir , parsed_url . netloc ] ) currwd = os . getcwd ( ) os . chdir ( work_dir ) try : download_call = [ 'gdc-client' , 'download' , '-t' , gdc_download_token , parsed_url . netloc ] subprocess . check_call ( download_call ) finally : os . chdir ( currwd ) assert os . path . exists ( file_dir ) output_files = [ os . path . join ( file_dir , x ) for x in os . listdir ( file_dir ) if not x . endswith ( 'logs' ) ] if len ( output_files ) == 1 : assert output_files [ 0 ] . endswith ( 'vcf' ) else : if not { os . path . splitext ( x ) [ 1 ] for x in output_files } >= { '.bam' , '.bai' } : raise ParameterError ( 'Can currently only handle pre-indexed GDC bams.' ) output_files = [ x for x in output_files if x . endswith ( ( 'bam' , 'bai' ) ) ] output_files = sorted ( output_files , key = lambda x : os . path . splitext ( x ) [ 1 ] , reverse = True ) if write_to_jobstore : output_files = [ job . fileStore . writeGlobalFile ( f ) for f in output_files ] return output_files
|
Download a supplied URL that points to a file in the NCBI GDC database . The path to the gdc download token must be provided . The file is downloaded and written to the jobstore if requested .
|
2,894
|
def get_file_from_url ( job , any_url , encryption_key = None , per_file_encryption = True , write_to_jobstore = True ) : work_dir = job . fileStore . getLocalTempDir ( ) filename = '/' . join ( [ work_dir , str ( uuid . uuid4 ( ) ) ] ) url = any_url parsed_url = urlparse ( any_url ) try : response = urllib2 . urlopen ( url ) except urllib2 . HTTPError : if parsed_url . netloc . startswith ( ( 's3' , 'S3' ) ) : job . fileStore . logToMaster ( "Detected https link is for an encrypted s3 file." ) return get_file_from_s3 ( job , any_url , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = write_to_jobstore ) else : raise else : with open ( filename , 'w' ) as f : f . write ( response . read ( ) ) if write_to_jobstore : filename = job . fileStore . writeGlobalFile ( filename ) return filename
|
Download a supplied URL that points to a file on an http https or ftp server . If the file is found to be an https s3 link then the file is downloaded using get_file_from_s3 . The file is downloaded and written to the jobstore if requested . Encryption arguments are for passing to get_file_from_s3 if required .
|
2,895
|
def bam2fastq ( bamfile , univ_options , picard_options ) : work_dir = os . path . split ( bamfile ) [ 0 ] base_name = os . path . split ( os . path . splitext ( bamfile ) [ 0 ] ) [ 1 ] parameters = [ 'SamToFastq' , '' . join ( [ 'I=' , docker_path ( bamfile ) ] ) , '' . join ( [ 'F=/data/' , base_name , '_1.fastq' ] ) , '' . join ( [ 'F2=/data/' , base_name , '_2.fastq' ] ) , '' . join ( [ 'FU=/data/' , base_name , '_UP.fastq' ] ) ] docker_call ( tool = 'picard' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = univ_options [ 'java_Xmx' ] , tool_version = picard_options [ 'version' ] ) first_fastq = '' . join ( [ work_dir , '/' , base_name , '_1.fastq' ] ) assert os . path . exists ( first_fastq ) return first_fastq
|
Split an input bam to paired fastqs .
|
2,896
|
def export_results ( job , fsid , file_name , univ_options , subfolder = None ) : job . fileStore . logToMaster ( 'Exporting %s to output location' % fsid ) file_name = os . path . basename ( file_name ) try : assert univ_options [ 'output_folder' ] , 'Need a path to a folder to write out files' assert univ_options [ 'storage_location' ] , 'Need to know where the files need to go. ' + 'Local or AWS/Azure, etc.' except AssertionError as err : print ( 'ERROR:' , err . message , file = sys . stderr ) return if univ_options [ 'output_folder' ] == 'NA' : output_folder = '' else : output_folder = univ_options [ 'output_folder' ] output_folder = os . path . join ( output_folder , univ_options [ 'patient' ] ) output_folder = os . path . join ( output_folder , subfolder ) if subfolder else output_folder if univ_options [ 'storage_location' ] == 'local' : try : os . makedirs ( output_folder , 0755 ) except OSError as err : if err . errno != errno . EEXIST : raise output_url = 'file://' + os . path . join ( output_folder , file_name ) elif univ_options [ 'storage_location' ] . startswith ( 'aws' ) : bucket_name = univ_options [ 'storage_location' ] . split ( ':' ) [ - 1 ] output_url = os . path . join ( 'S3://' , bucket_name , output_folder . strip ( '/' ) , file_name ) else : print ( "Currently doesn't support anything but Local and aws." ) return job . fileStore . exportFile ( fsid , output_url )
|
Write out a file to a given location . The location can be either a directory on the local machine or a folder with a bucket on AWS .
|
2,897
|
def parse_chromosome_string ( job , chromosome_string ) : if chromosome_string is None : return [ ] else : assert isinstance ( chromosome_string , str ) chroms = [ c . strip ( ) for c in chromosome_string . split ( ',' ) ] if 'canonical' in chroms : assert 'canonical_chr' not in chroms , 'Cannot have canonical and canonical_chr' chr_prefix = False chroms . remove ( 'canonical' ) out_chroms = [ str ( c ) for c in range ( 1 , 23 ) ] + [ 'X' , 'Y' ] elif 'canonical_chr' in chroms : assert 'canonical' not in chroms , 'Cannot have canonical and canonical_chr' chr_prefix = True chroms . remove ( 'canonical_chr' ) out_chroms = [ 'chr' + str ( c ) for c in range ( 1 , 23 ) ] + [ 'chrX' , 'chrY' ] else : chr_prefix = None out_chroms = [ ] for chrom in chroms : if chr_prefix is not None and chrom . startswith ( 'chr' ) is not chr_prefix : job . fileStore . logToMaster ( 'chromosome %s does not match the rest that %s begin ' 'with "chr".' % ( chrom , 'all' if chr_prefix else 'don\'t' ) , level = logging . WARNING ) out_chroms . append ( chrom ) return chrom_sorted ( out_chroms )
|
Parse a chromosome string into a list .
|
2,898
|
def email_report ( job , univ_options ) : fromadd = "results@protect.cgl.genomics.ucsc.edu" msg = MIMEMultipart ( ) msg [ 'From' ] = fromadd if univ_options [ 'mail_to' ] is None : return else : msg [ 'To' ] = univ_options [ 'mail_to' ] msg [ 'Subject' ] = "Protect run for sample %s completed successfully." % univ_options [ 'patient' ] body = "Protect run for sample %s completed successfully." % univ_options [ 'patient' ] msg . attach ( MIMEText ( body , 'plain' ) ) text = msg . as_string ( ) try : server = smtplib . SMTP ( 'localhost' ) except socket . error as e : if e . errno == 111 : print ( 'No mail utils on this maachine' ) else : print ( 'Unexpected error while attempting to send an email.' ) print ( 'Could not send email report' ) except : print ( 'Could not send email report' ) else : server . sendmail ( fromadd , msg [ 'To' ] , text ) server . quit ( )
|
Send an email to the user when the run finishes .
|
2,899
|
def make_key_hippie ( obj , typed = True ) : ftype = type if typed else lambda o : None if is_hashable ( obj ) : return obj , ftype ( obj ) if isinstance ( obj , set ) : obj = sorted ( obj ) if isinstance ( obj , ( list , tuple ) ) : return tuple ( make_key_hippie ( e , typed ) for e in obj ) if isinstance ( obj , dict ) : return tuple ( sorted ( ( ( make_key_hippie ( k , typed ) , make_key_hippie ( v , typed ) ) for k , v in obj . items ( ) ) ) ) raise ValueError ( "%r can not be hashed. Try providing a custom key function." % obj )
|
Return hashable structure from non - hashable structure using hippie means
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.